From 573581523a1e0035cf0f3dc00bfe3f32ad12eb49 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Fri, 10 May 2024 17:24:43 +0000 Subject: [PATCH 1/7] feat(aiplatform)!: update the API BREAKING CHANGE: This release has breaking changes. #### aiplatform:v1beta1 The following keys were deleted: - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.description - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.id - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.properties.videoEmbeddings.description - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.properties.videoEmbeddings.items.type - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.properties.videoEmbeddings.type - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.type - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.description - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.id - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.properties.response.$ref - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.properties.response.description - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.type - schemas.CloudAiLargeModelsVisionReasonVideoResponse.description - schemas.CloudAiLargeModelsVisionReasonVideoResponse.id - schemas.CloudAiLargeModelsVisionReasonVideoResponse.properties.responses.description - schemas.CloudAiLargeModelsVisionReasonVideoResponse.properties.responses.items.$ref - schemas.CloudAiLargeModelsVisionReasonVideoResponse.properties.responses.type - schemas.CloudAiLargeModelsVisionReasonVideoResponse.type - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.description - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.id - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.relativeTemporalPartition.$ref - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.relativeTemporalPartition.description - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.text.description - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.text.type - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.type - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.description - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.id - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.endOffset.description - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.endOffset.format - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.endOffset.type - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.startOffset.description - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.startOffset.format - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.startOffset.type - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.type - schemas.CloudAiNlLlmProtoServiceCandidate.id - schemas.CloudAiNlLlmProtoServiceCandidate.properties.citationMetadata.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.citationMetadata.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.content.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.content.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishMessage.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishMessage.type - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.enum - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.enumDescriptions - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.type - schemas.CloudAiNlLlmProtoServiceCandidate.properties.groundingMetadata.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.groundingMetadata.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.index.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.index.format - schemas.CloudAiNlLlmProtoServiceCandidate.properties.index.type - schemas.CloudAiNlLlmProtoServiceCandidate.properties.safetyRatings.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.safetyRatings.items.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.safetyRatings.type - schemas.CloudAiNlLlmProtoServiceCandidate.type - schemas.CloudAiNlLlmProtoServiceCitation.description - schemas.CloudAiNlLlmProtoServiceCitation.id - schemas.CloudAiNlLlmProtoServiceCitation.properties.endIndex.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.endIndex.format - schemas.CloudAiNlLlmProtoServiceCitation.properties.endIndex.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.license.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.license.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.publicationDate.$ref - schemas.CloudAiNlLlmProtoServiceCitation.properties.publicationDate.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.startIndex.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.startIndex.format - schemas.CloudAiNlLlmProtoServiceCitation.properties.startIndex.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.title.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.title.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.uri.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.uri.type - schemas.CloudAiNlLlmProtoServiceCitation.type - schemas.CloudAiNlLlmProtoServiceCitationMetadata.description - schemas.CloudAiNlLlmProtoServiceCitationMetadata.id - schemas.CloudAiNlLlmProtoServiceCitationMetadata.properties.citations.description - schemas.CloudAiNlLlmProtoServiceCitationMetadata.properties.citations.items.$ref - schemas.CloudAiNlLlmProtoServiceCitationMetadata.properties.citations.type - schemas.CloudAiNlLlmProtoServiceCitationMetadata.type - schemas.CloudAiNlLlmProtoServiceContent.description - schemas.CloudAiNlLlmProtoServiceContent.id - schemas.CloudAiNlLlmProtoServiceContent.properties.isCached.description - schemas.CloudAiNlLlmProtoServiceContent.properties.isCached.type - schemas.CloudAiNlLlmProtoServiceContent.properties.parts.description - schemas.CloudAiNlLlmProtoServiceContent.properties.parts.items.$ref - schemas.CloudAiNlLlmProtoServiceContent.properties.parts.type - schemas.CloudAiNlLlmProtoServiceContent.properties.role.description - schemas.CloudAiNlLlmProtoServiceContent.properties.role.type - schemas.CloudAiNlLlmProtoServiceContent.type - schemas.CloudAiNlLlmProtoServiceFact.description - schemas.CloudAiNlLlmProtoServiceFact.id - schemas.CloudAiNlLlmProtoServiceFact.properties.query.description - schemas.CloudAiNlLlmProtoServiceFact.properties.query.type - schemas.CloudAiNlLlmProtoServiceFact.properties.summary.description - schemas.CloudAiNlLlmProtoServiceFact.properties.summary.type - schemas.CloudAiNlLlmProtoServiceFact.properties.title.description - schemas.CloudAiNlLlmProtoServiceFact.properties.title.type - schemas.CloudAiNlLlmProtoServiceFact.properties.url.description - schemas.CloudAiNlLlmProtoServiceFact.properties.url.type - schemas.CloudAiNlLlmProtoServiceFact.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.id - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.additionalProperties.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.additionalProperties.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.name.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.name.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.id - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.name.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.name.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.additionalProperties.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.additionalProperties.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.type - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.id - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.candidates.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.candidates.items.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.candidates.type - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.debugMetadata.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.debugMetadata.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.facts.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.facts.items.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.facts.type - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.promptFeedback.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.promptFeedback.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.reportingMetrics.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.reportingMetrics.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.usageMetadata.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.usageMetadata.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.type - schemas.CloudAiNlLlmProtoServiceMessageMetadata.id - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.factualityDebugMetadata.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.factualityDebugMetadata.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.inputFilterInfo.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.inputFilterInfo.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.modelRoutingDecision.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.modelRoutingDecision.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.outputFilterInfo.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.outputFilterInfo.items.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.outputFilterInfo.type - schemas.CloudAiNlLlmProtoServiceMessageMetadata.type - schemas.CloudAiNlLlmProtoServicePart.description - schemas.CloudAiNlLlmProtoServicePart.id - schemas.CloudAiNlLlmProtoServicePart.properties.documentMetadata.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.documentMetadata.description - schemas.CloudAiNlLlmProtoServicePart.properties.fileData.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.fileData.description - schemas.CloudAiNlLlmProtoServicePart.properties.functionCall.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.functionCall.description - schemas.CloudAiNlLlmProtoServicePart.properties.functionResponse.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.functionResponse.description - schemas.CloudAiNlLlmProtoServicePart.properties.inlineData.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.inlineData.description - schemas.CloudAiNlLlmProtoServicePart.properties.lmRootMetadata.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.lmRootMetadata.description - schemas.CloudAiNlLlmProtoServicePart.properties.text.description - schemas.CloudAiNlLlmProtoServicePart.properties.text.type - schemas.CloudAiNlLlmProtoServicePart.properties.videoMetadata.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.videoMetadata.description - schemas.CloudAiNlLlmProtoServicePart.type - schemas.CloudAiNlLlmProtoServicePartBlob.description - schemas.CloudAiNlLlmProtoServicePartBlob.id - schemas.CloudAiNlLlmProtoServicePartBlob.properties.data.description - schemas.CloudAiNlLlmProtoServicePartBlob.properties.data.format - schemas.CloudAiNlLlmProtoServicePartBlob.properties.data.type - schemas.CloudAiNlLlmProtoServicePartBlob.properties.mimeType.description - schemas.CloudAiNlLlmProtoServicePartBlob.properties.mimeType.type - schemas.CloudAiNlLlmProtoServicePartBlob.properties.originalFileData.$ref - schemas.CloudAiNlLlmProtoServicePartBlob.properties.originalFileData.description - schemas.CloudAiNlLlmProtoServicePartBlob.type - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.description - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.id - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.originalDocumentBlob.$ref - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.originalDocumentBlob.description - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.pageNumber.description - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.pageNumber.format - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.pageNumber.type - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.type - schemas.CloudAiNlLlmProtoServicePartFileData.description - schemas.CloudAiNlLlmProtoServicePartFileData.id - schemas.CloudAiNlLlmProtoServicePartFileData.properties.fileUri.description - schemas.CloudAiNlLlmProtoServicePartFileData.properties.fileUri.type - schemas.CloudAiNlLlmProtoServicePartFileData.properties.mimeType.description - schemas.CloudAiNlLlmProtoServicePartFileData.properties.mimeType.type - schemas.CloudAiNlLlmProtoServicePartFileData.type - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.description - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.id - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.properties.chunkId.description - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.properties.chunkId.type - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.type - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.description - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.id - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.endOffset.description - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.endOffset.format - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.endOffset.type - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.startOffset.description - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.startOffset.format - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.startOffset.type - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.id - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.enum - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.enumDescriptions - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReasonMessage.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReasonMessage.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.safetyRatings.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.safetyRatings.items.$ref - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.safetyRatings.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.type - schemas.CloudAiNlLlmProtoServiceRaiResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.id - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.aidaRecitationResult.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.aidaRecitationResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.blocked.deprecated - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.blocked.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.blocked.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.items.format - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.items.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.filtered.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.filtered.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.languageFilterResult.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.languageFilterResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.mmRecitationResult.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.mmRecitationResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.raiSignals.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.raiSignals.items.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.raiSignals.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.translationRequestInfos.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.translationRequestInfos.items.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.translationRequestInfos.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredBlocklist.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredBlocklist.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredRecitation.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredRecitation.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredSafetyFilter.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredSafetyFilter.type - schemas.CloudAiNlLlmProtoServiceRaiResult.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.id - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.enum - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.enumDescriptions - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.flagged.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.flagged.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.influentialTerms.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.influentialTerms.items.$ref - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.influentialTerms.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.enum - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.enumDescriptions - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.score.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.score.format - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.score.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.id - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.beginOffset.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.beginOffset.format - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.beginOffset.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.confidence.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.confidence.format - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.confidence.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.enum - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.enumDescriptions - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.term.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.term.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.id - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.blocked.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.blocked.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.enum - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.influentialTerms.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.influentialTerms.items.$ref - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.influentialTerms.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.enum - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probabilityScore.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probabilityScore.format - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probabilityScore.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.enum - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severityScore.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severityScore.format - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severityScore.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.id - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.beginOffset.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.beginOffset.format - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.beginOffset.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.confidence.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.confidence.format - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.confidence.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.enum - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.term.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.term.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.description - schemas.CloudAiNlLlmProtoServiceUsageMetadata.id - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.candidatesTokenCount.description - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.candidatesTokenCount.format - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.candidatesTokenCount.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.promptTokenCount.description - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.promptTokenCount.format - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.promptTokenCount.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.totalTokenCount.format - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.totalTokenCount.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.id - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.argentumMetricId.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.argentumMetricId.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.doubleValue.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.doubleValue.format - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.doubleValue.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.int64Value.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.int64Value.format - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.int64Value.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.metricName.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.metricName.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.systemLabels.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.systemLabels.items.$ref - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.systemLabels.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.type - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.id - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelName.description - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelName.type - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelValue.description - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelValue.type - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.type - schemas.IntelligenceCloudAutomlXpsReportingMetrics.id - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.deprecated - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.description - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.format - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.type - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.metricEntries.description - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.metricEntries.items.$ref - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.metricEntries.type - schemas.IntelligenceCloudAutomlXpsReportingMetrics.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.id - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.amarnaId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.arxivId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.author.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bibkey.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.biorxivId.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.biorxivId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookTitle.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookVolumeId.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookVolumeId.format - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookVolumeId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.category.enum - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.category.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.category.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.conversationId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.enum - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.filepath.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.geminiId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.gnewsArticleTitle.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.goodallExampleId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.isOptOut.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.isOptOut.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.isPrompt.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.lamdaExampleId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.license.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.meenaConversationId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.naturalLanguageCode.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.naturalLanguageCode.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.noAttribution.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.noAttribution.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.podcastUtteranceId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.publicationDate.$ref - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.qualityScoreExperimentOnly.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.qualityScoreExperimentOnly.format - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.qualityScoreExperimentOnly.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.repo.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.repo.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.url.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.url.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.volumeId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.wikipediaArticleTitle.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.wikipediaArticleTitle.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.youtubeVideoId.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.youtubeVideoId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.description - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.id - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.dynamicSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.dynamicSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.description - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.enum - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.trainingSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.trainingSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.id - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.enum - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.displayAttributionMessage.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.displayAttributionMessage.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docAttribution.$ref - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docOccurrences.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docOccurrences.format - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docOccurrences.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.endIndex.format - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.endIndex.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.rawText.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.rawText.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.segmentRecitationAction.enum - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.segmentRecitationAction.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.segmentRecitationAction.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.enum - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.startIndex.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.startIndex.format - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.startIndex.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.id - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.dynamicSegmentResults.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.dynamicSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.dynamicSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.fullyCheckedTextIndex.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.fullyCheckedTextIndex.format - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.fullyCheckedTextIndex.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.enum - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.trainingSegmentResults.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.trainingSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.trainingSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.type - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.description - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.id - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.properties.imageResult.$ref - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.properties.textResult.$ref - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.type - schemas.LearningGenaiRecitationDocAttribution.description - schemas.LearningGenaiRecitationDocAttribution.id - schemas.LearningGenaiRecitationDocAttribution.properties.amarnaId.type - schemas.LearningGenaiRecitationDocAttribution.properties.arxivId.type - schemas.LearningGenaiRecitationDocAttribution.properties.author.type - schemas.LearningGenaiRecitationDocAttribution.properties.bibkey.type - schemas.LearningGenaiRecitationDocAttribution.properties.biorxivId.description - schemas.LearningGenaiRecitationDocAttribution.properties.biorxivId.type - schemas.LearningGenaiRecitationDocAttribution.properties.bookTitle.type - schemas.LearningGenaiRecitationDocAttribution.properties.bookVolumeId.description - schemas.LearningGenaiRecitationDocAttribution.properties.bookVolumeId.format - schemas.LearningGenaiRecitationDocAttribution.properties.bookVolumeId.type - schemas.LearningGenaiRecitationDocAttribution.properties.conversationId.type - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.description - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.enum - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.enumDescriptions - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.type - schemas.LearningGenaiRecitationDocAttribution.properties.filepath.type - schemas.LearningGenaiRecitationDocAttribution.properties.geminiId.type - schemas.LearningGenaiRecitationDocAttribution.properties.gnewsArticleTitle.type - schemas.LearningGenaiRecitationDocAttribution.properties.goodallExampleId.type - schemas.LearningGenaiRecitationDocAttribution.properties.isOptOut.description - schemas.LearningGenaiRecitationDocAttribution.properties.isOptOut.type - schemas.LearningGenaiRecitationDocAttribution.properties.isPrompt.description - schemas.LearningGenaiRecitationDocAttribution.properties.isPrompt.type - schemas.LearningGenaiRecitationDocAttribution.properties.lamdaExampleId.type - schemas.LearningGenaiRecitationDocAttribution.properties.license.type - schemas.LearningGenaiRecitationDocAttribution.properties.meenaConversationId.type - schemas.LearningGenaiRecitationDocAttribution.properties.naturalLanguageCode.description - schemas.LearningGenaiRecitationDocAttribution.properties.naturalLanguageCode.type - schemas.LearningGenaiRecitationDocAttribution.properties.noAttribution.description - schemas.LearningGenaiRecitationDocAttribution.properties.noAttribution.type - schemas.LearningGenaiRecitationDocAttribution.properties.podcastUtteranceId.type - schemas.LearningGenaiRecitationDocAttribution.properties.publicationDate.$ref - schemas.LearningGenaiRecitationDocAttribution.properties.qualityScoreExperimentOnly.description - schemas.LearningGenaiRecitationDocAttribution.properties.qualityScoreExperimentOnly.format - schemas.LearningGenaiRecitationDocAttribution.properties.qualityScoreExperimentOnly.type - schemas.LearningGenaiRecitationDocAttribution.properties.repo.description - schemas.LearningGenaiRecitationDocAttribution.properties.repo.type - schemas.LearningGenaiRecitationDocAttribution.properties.url.description - schemas.LearningGenaiRecitationDocAttribution.properties.url.type - schemas.LearningGenaiRecitationDocAttribution.properties.volumeId.type - schemas.LearningGenaiRecitationDocAttribution.properties.wikipediaArticleTitle.description - schemas.LearningGenaiRecitationDocAttribution.properties.wikipediaArticleTitle.type - schemas.LearningGenaiRecitationDocAttribution.properties.youtubeVideoId.type - schemas.LearningGenaiRecitationDocAttribution.type - schemas.LearningGenaiRecitationImageDocAttribution.description - schemas.LearningGenaiRecitationImageDocAttribution.id - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.description - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.enum - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.enumDescriptions - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.type - schemas.LearningGenaiRecitationImageDocAttribution.properties.stringDocids.description - schemas.LearningGenaiRecitationImageDocAttribution.properties.stringDocids.type - schemas.LearningGenaiRecitationImageDocAttribution.type - schemas.LearningGenaiRecitationImageRecitationCheckResult.id - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.description - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.enum - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.enumDescriptions - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.type - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitedImages.description - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitedImages.items.$ref - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitedImages.type - schemas.LearningGenaiRecitationImageRecitationCheckResult.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.id - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.docAttribution.$ref - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.docAttribution.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.enum - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.enumDescriptions - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.imageId.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.imageId.format - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.imageId.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.scores.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.scores.format - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.scores.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.type - schemas.LearningGenaiRecitationMMRecitationCheckResult.description - schemas.LearningGenaiRecitationMMRecitationCheckResult.id - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.chunkResults.items.$ref - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.chunkResults.type - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.description - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.enum - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.enumDescriptions - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.type - schemas.LearningGenaiRecitationMMRecitationCheckResult.type - schemas.LearningGenaiRecitationRecitationResult.description - schemas.LearningGenaiRecitationRecitationResult.id - schemas.LearningGenaiRecitationRecitationResult.properties.dynamicSegmentResults.items.$ref - schemas.LearningGenaiRecitationRecitationResult.properties.dynamicSegmentResults.type - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.description - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.enum - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.enumDescriptions - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.type - schemas.LearningGenaiRecitationRecitationResult.properties.trainingSegmentResults.items.$ref - schemas.LearningGenaiRecitationRecitationResult.properties.trainingSegmentResults.type - schemas.LearningGenaiRecitationRecitationResult.type - schemas.LearningGenaiRecitationSegmentResult.description - schemas.LearningGenaiRecitationSegmentResult.id - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.description - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.enum - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.enumDescriptions - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.type - schemas.LearningGenaiRecitationSegmentResult.properties.displayAttributionMessage.description - schemas.LearningGenaiRecitationSegmentResult.properties.displayAttributionMessage.type - schemas.LearningGenaiRecitationSegmentResult.properties.docAttribution.$ref - schemas.LearningGenaiRecitationSegmentResult.properties.docOccurrences.description - schemas.LearningGenaiRecitationSegmentResult.properties.docOccurrences.format - schemas.LearningGenaiRecitationSegmentResult.properties.docOccurrences.type - schemas.LearningGenaiRecitationSegmentResult.properties.endIndex.format - schemas.LearningGenaiRecitationSegmentResult.properties.endIndex.type - schemas.LearningGenaiRecitationSegmentResult.properties.rawText.description - schemas.LearningGenaiRecitationSegmentResult.properties.rawText.type - schemas.LearningGenaiRecitationSegmentResult.properties.segmentRecitationAction.enum - schemas.LearningGenaiRecitationSegmentResult.properties.segmentRecitationAction.enumDescriptions - schemas.LearningGenaiRecitationSegmentResult.properties.segmentRecitationAction.type - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.description - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.enum - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.enumDescriptions - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.type - schemas.LearningGenaiRecitationSegmentResult.properties.startIndex.description - schemas.LearningGenaiRecitationSegmentResult.properties.startIndex.format - schemas.LearningGenaiRecitationSegmentResult.properties.startIndex.type - schemas.LearningGenaiRecitationSegmentResult.type - schemas.LearningGenaiRootCalculationType.description - schemas.LearningGenaiRootCalculationType.id - schemas.LearningGenaiRootCalculationType.properties.scoreType.enum - schemas.LearningGenaiRootCalculationType.properties.scoreType.enumDescriptions - schemas.LearningGenaiRootCalculationType.properties.scoreType.type - schemas.LearningGenaiRootCalculationType.properties.weights.format - schemas.LearningGenaiRootCalculationType.properties.weights.type - schemas.LearningGenaiRootCalculationType.type - schemas.LearningGenaiRootClassifierOutput.id - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutput.$ref - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutput.deprecated - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutput.description - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutputs.description - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutputs.items.$ref - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutputs.type - schemas.LearningGenaiRootClassifierOutput.properties.state.$ref - schemas.LearningGenaiRootClassifierOutput.properties.state.description - schemas.LearningGenaiRootClassifierOutput.type - schemas.LearningGenaiRootClassifierOutputSummary.id - schemas.LearningGenaiRootClassifierOutputSummary.properties.metrics.items.$ref - schemas.LearningGenaiRootClassifierOutputSummary.properties.metrics.type - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutput.$ref - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutput.deprecated - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutput.description - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutputs.description - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutputs.items.$ref - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutputs.type - schemas.LearningGenaiRootClassifierOutputSummary.type - schemas.LearningGenaiRootClassifierState.description - schemas.LearningGenaiRootClassifierState.id - schemas.LearningGenaiRootClassifierState.properties.dataProviderOutput.items.$ref - schemas.LearningGenaiRootClassifierState.properties.dataProviderOutput.type - schemas.LearningGenaiRootClassifierState.properties.metricOutput.items.$ref - schemas.LearningGenaiRootClassifierState.properties.metricOutput.type - schemas.LearningGenaiRootClassifierState.type - schemas.LearningGenaiRootCodeyChatMetadata.description - schemas.LearningGenaiRootCodeyChatMetadata.id - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.description - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.enum - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.enumDescriptions - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.type - schemas.LearningGenaiRootCodeyChatMetadata.type - schemas.LearningGenaiRootCodeyCheckpoint.description - schemas.LearningGenaiRootCodeyCheckpoint.id - schemas.LearningGenaiRootCodeyCheckpoint.properties.codeyTruncatorMetadata.$ref - schemas.LearningGenaiRootCodeyCheckpoint.properties.codeyTruncatorMetadata.description - schemas.LearningGenaiRootCodeyCheckpoint.properties.currentSample.description - schemas.LearningGenaiRootCodeyCheckpoint.properties.currentSample.type - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.description - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.enum - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.enumDeprecated - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.enumDescriptions - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.type - schemas.LearningGenaiRootCodeyCheckpoint.type - schemas.LearningGenaiRootCodeyCompletionMetadata.description - schemas.LearningGenaiRootCodeyCompletionMetadata.id - schemas.LearningGenaiRootCodeyCompletionMetadata.properties.checkpoints.items.$ref - schemas.LearningGenaiRootCodeyCompletionMetadata.properties.checkpoints.type - schemas.LearningGenaiRootCodeyCompletionMetadata.type - schemas.LearningGenaiRootCodeyGenerationMetadata.description - schemas.LearningGenaiRootCodeyGenerationMetadata.id - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.output.description - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.output.type - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.description - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.enum - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.enumDeprecated - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.enumDescriptions - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.type - schemas.LearningGenaiRootCodeyGenerationMetadata.type - schemas.LearningGenaiRootCodeyOutput.description - schemas.LearningGenaiRootCodeyOutput.id - schemas.LearningGenaiRootCodeyOutput.properties.codeyChatMetadata.$ref - schemas.LearningGenaiRootCodeyOutput.properties.codeyCompletionMetadata.$ref - schemas.LearningGenaiRootCodeyOutput.properties.codeyGenerationMetadata.$ref - schemas.LearningGenaiRootCodeyOutput.type - schemas.LearningGenaiRootCodeyTruncatorMetadata.description - schemas.LearningGenaiRootCodeyTruncatorMetadata.id - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.cutoffIndex.description - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.cutoffIndex.format - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.cutoffIndex.type - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.truncatedText.description - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.truncatedText.type - schemas.LearningGenaiRootCodeyTruncatorMetadata.type - schemas.LearningGenaiRootControlDecodingConfigThreshold.description - schemas.LearningGenaiRootControlDecodingConfigThreshold.id - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.policy.enum - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.policy.enumDescriptions - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.policy.type - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.scoreMax.format - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.scoreMax.type - schemas.LearningGenaiRootControlDecodingConfigThreshold.type - schemas.LearningGenaiRootControlDecodingRecord.description - schemas.LearningGenaiRootControlDecodingRecord.id - schemas.LearningGenaiRootControlDecodingRecord.properties.prefixes.description - schemas.LearningGenaiRootControlDecodingRecord.properties.prefixes.type - schemas.LearningGenaiRootControlDecodingRecord.properties.scores.description - schemas.LearningGenaiRootControlDecodingRecord.properties.scores.items.$ref - schemas.LearningGenaiRootControlDecodingRecord.properties.scores.type - schemas.LearningGenaiRootControlDecodingRecord.properties.suffiexes.description - schemas.LearningGenaiRootControlDecodingRecord.properties.suffiexes.type - schemas.LearningGenaiRootControlDecodingRecord.properties.thresholds.description - schemas.LearningGenaiRootControlDecodingRecord.properties.thresholds.items.$ref - schemas.LearningGenaiRootControlDecodingRecord.properties.thresholds.type - schemas.LearningGenaiRootControlDecodingRecord.type - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.id - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.policy.enum - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.policy.enumDescriptions - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.policy.type - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.score.format - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.score.type - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.type - schemas.LearningGenaiRootControlDecodingRecords.id - schemas.LearningGenaiRootControlDecodingRecords.properties.records.description - schemas.LearningGenaiRootControlDecodingRecords.properties.records.items.$ref - schemas.LearningGenaiRootControlDecodingRecords.properties.records.type - schemas.LearningGenaiRootControlDecodingRecords.type - schemas.LearningGenaiRootDataProviderOutput.id - schemas.LearningGenaiRootDataProviderOutput.properties.name.type - schemas.LearningGenaiRootDataProviderOutput.properties.status.$ref - schemas.LearningGenaiRootDataProviderOutput.properties.status.description - schemas.LearningGenaiRootDataProviderOutput.type - schemas.LearningGenaiRootFilterMetadata.id - schemas.LearningGenaiRootFilterMetadata.properties.confidence.description - schemas.LearningGenaiRootFilterMetadata.properties.confidence.enum - schemas.LearningGenaiRootFilterMetadata.properties.confidence.enumDescriptions - schemas.LearningGenaiRootFilterMetadata.properties.confidence.type - schemas.LearningGenaiRootFilterMetadata.properties.debugInfo.$ref - schemas.LearningGenaiRootFilterMetadata.properties.debugInfo.description - schemas.LearningGenaiRootFilterMetadata.properties.fallback.description - schemas.LearningGenaiRootFilterMetadata.properties.fallback.type - schemas.LearningGenaiRootFilterMetadata.properties.info.description - schemas.LearningGenaiRootFilterMetadata.properties.info.type - schemas.LearningGenaiRootFilterMetadata.properties.name.description - schemas.LearningGenaiRootFilterMetadata.properties.name.type - schemas.LearningGenaiRootFilterMetadata.properties.reason.description - schemas.LearningGenaiRootFilterMetadata.properties.reason.enum - schemas.LearningGenaiRootFilterMetadata.properties.reason.enumDescriptions - schemas.LearningGenaiRootFilterMetadata.properties.reason.type - schemas.LearningGenaiRootFilterMetadata.properties.text.description - schemas.LearningGenaiRootFilterMetadata.properties.text.type - schemas.LearningGenaiRootFilterMetadata.type - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.id - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.classifierOutput.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.defaultMetadata.type - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.languageFilterResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiOutput.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiOutput.description - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiSignal.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiSignal.deprecated - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.records.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.records.description - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.streamRecitationResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.streamRecitationResult.deprecated - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.takedownResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.toxicityResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.type - schemas.LearningGenaiRootGroundingMetadata.id - schemas.LearningGenaiRootGroundingMetadata.properties.citations.items.$ref - schemas.LearningGenaiRootGroundingMetadata.properties.citations.type - schemas.LearningGenaiRootGroundingMetadata.properties.groundingCancelled.description - schemas.LearningGenaiRootGroundingMetadata.properties.groundingCancelled.type - schemas.LearningGenaiRootGroundingMetadata.properties.searchQueries.items.type - schemas.LearningGenaiRootGroundingMetadata.properties.searchQueries.type - schemas.LearningGenaiRootGroundingMetadata.type - schemas.LearningGenaiRootGroundingMetadataCitation.id - schemas.LearningGenaiRootGroundingMetadataCitation.properties.endIndex.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.endIndex.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.endIndex.type - schemas.LearningGenaiRootGroundingMetadataCitation.properties.factIndex.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.factIndex.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.factIndex.type - schemas.LearningGenaiRootGroundingMetadataCitation.properties.score.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.score.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.score.type - schemas.LearningGenaiRootGroundingMetadataCitation.properties.startIndex.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.startIndex.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.startIndex.type - schemas.LearningGenaiRootGroundingMetadataCitation.type - schemas.LearningGenaiRootHarm.id - schemas.LearningGenaiRootHarm.properties.contextualDangerous.description - schemas.LearningGenaiRootHarm.properties.contextualDangerous.type - schemas.LearningGenaiRootHarm.properties.csam.type - schemas.LearningGenaiRootHarm.properties.fringe.type - schemas.LearningGenaiRootHarm.properties.grailImageHarmType.$ref - schemas.LearningGenaiRootHarm.properties.grailTextHarmType.$ref - schemas.LearningGenaiRootHarm.properties.imageChild.type - schemas.LearningGenaiRootHarm.properties.imageCsam.type - schemas.LearningGenaiRootHarm.properties.imagePedo.type - schemas.LearningGenaiRootHarm.properties.imagePorn.description - schemas.LearningGenaiRootHarm.properties.imagePorn.type - schemas.LearningGenaiRootHarm.properties.imageViolence.type - schemas.LearningGenaiRootHarm.properties.pqc.type - schemas.LearningGenaiRootHarm.properties.safetycat.$ref - schemas.LearningGenaiRootHarm.properties.spii.$ref - schemas.LearningGenaiRootHarm.properties.spii.description - schemas.LearningGenaiRootHarm.properties.threshold.format - schemas.LearningGenaiRootHarm.properties.threshold.type - schemas.LearningGenaiRootHarm.properties.videoFrameChild.type - schemas.LearningGenaiRootHarm.properties.videoFrameCsam.type - schemas.LearningGenaiRootHarm.properties.videoFramePedo.type - schemas.LearningGenaiRootHarm.properties.videoFramePorn.description - schemas.LearningGenaiRootHarm.properties.videoFramePorn.type - schemas.LearningGenaiRootHarm.properties.videoFrameViolence.type - schemas.LearningGenaiRootHarm.type - schemas.LearningGenaiRootHarmGrailImageHarmType.description - schemas.LearningGenaiRootHarmGrailImageHarmType.id - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.items.enum - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.items.enumDescriptions - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.items.type - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.type - schemas.LearningGenaiRootHarmGrailImageHarmType.type - schemas.LearningGenaiRootHarmGrailTextHarmType.description - schemas.LearningGenaiRootHarmGrailTextHarmType.id - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.items.enum - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.items.enumDescriptions - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.items.type - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.type - schemas.LearningGenaiRootHarmGrailTextHarmType.type - schemas.LearningGenaiRootHarmSafetyCatCategories.description - schemas.LearningGenaiRootHarmSafetyCatCategories.id - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.items.enum - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.items.enumDescriptions - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.items.type - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.type - schemas.LearningGenaiRootHarmSafetyCatCategories.type - schemas.LearningGenaiRootHarmSpiiFilter.description - schemas.LearningGenaiRootHarmSpiiFilter.id - schemas.LearningGenaiRootHarmSpiiFilter.properties.usBankRoutingMicr.type - schemas.LearningGenaiRootHarmSpiiFilter.properties.usEmployerIdentificationNumber.type - schemas.LearningGenaiRootHarmSpiiFilter.properties.usSocialSecurityNumber.type - schemas.LearningGenaiRootHarmSpiiFilter.type - schemas.LearningGenaiRootInternalMetadata.id - schemas.LearningGenaiRootInternalMetadata.properties.scoredTokens.items.$ref - schemas.LearningGenaiRootInternalMetadata.properties.scoredTokens.type - schemas.LearningGenaiRootInternalMetadata.type - schemas.LearningGenaiRootLanguageFilterResult.id - schemas.LearningGenaiRootLanguageFilterResult.properties.allowed.description - schemas.LearningGenaiRootLanguageFilterResult.properties.allowed.type - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguage.description - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguage.type - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguageProbability.description - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguageProbability.format - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguageProbability.type - schemas.LearningGenaiRootLanguageFilterResult.type - schemas.LearningGenaiRootMetricOutput.id - schemas.LearningGenaiRootMetricOutput.properties.debug.type - schemas.LearningGenaiRootMetricOutput.properties.name.description - schemas.LearningGenaiRootMetricOutput.properties.name.type - schemas.LearningGenaiRootMetricOutput.properties.numericValue.format - schemas.LearningGenaiRootMetricOutput.properties.numericValue.type - schemas.LearningGenaiRootMetricOutput.properties.status.$ref - schemas.LearningGenaiRootMetricOutput.properties.stringValue.type - schemas.LearningGenaiRootMetricOutput.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.id - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.additionalProperties.format - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.additionalProperties.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.description - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.prompt2queryMilliseconds.description - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.prompt2queryMilliseconds.format - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.prompt2queryMilliseconds.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.retrievalAugmentMilliseconds.description - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.retrievalAugmentMilliseconds.format - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.retrievalAugmentMilliseconds.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.type - schemas.LearningGenaiRootRAIOutput.description - schemas.LearningGenaiRootRAIOutput.id - schemas.LearningGenaiRootRAIOutput.properties.allowed.type - schemas.LearningGenaiRootRAIOutput.properties.harm.$ref - schemas.LearningGenaiRootRAIOutput.properties.name.type - schemas.LearningGenaiRootRAIOutput.properties.score.format - schemas.LearningGenaiRootRAIOutput.properties.score.type - schemas.LearningGenaiRootRAIOutput.type - schemas.LearningGenaiRootRegexTakedownResult.id - schemas.LearningGenaiRootRegexTakedownResult.properties.allowed.description - schemas.LearningGenaiRootRegexTakedownResult.properties.allowed.type - schemas.LearningGenaiRootRegexTakedownResult.properties.takedownRegex.description - schemas.LearningGenaiRootRegexTakedownResult.properties.takedownRegex.type - schemas.LearningGenaiRootRegexTakedownResult.type - schemas.LearningGenaiRootRequestMetrics.id - schemas.LearningGenaiRootRequestMetrics.properties.audioMetrics.$ref - schemas.LearningGenaiRootRequestMetrics.properties.audioMetrics.description - schemas.LearningGenaiRootRequestMetrics.properties.imageMetrics.$ref - schemas.LearningGenaiRootRequestMetrics.properties.imageMetrics.description - schemas.LearningGenaiRootRequestMetrics.properties.textTokenCount.description - schemas.LearningGenaiRootRequestMetrics.properties.textTokenCount.format - schemas.LearningGenaiRootRequestMetrics.properties.textTokenCount.type - schemas.LearningGenaiRootRequestMetrics.properties.totalTokenCount.description - schemas.LearningGenaiRootRequestMetrics.properties.totalTokenCount.format - schemas.LearningGenaiRootRequestMetrics.properties.totalTokenCount.type - schemas.LearningGenaiRootRequestMetrics.properties.videoMetrics.$ref - schemas.LearningGenaiRootRequestMetrics.properties.videoMetrics.description - schemas.LearningGenaiRootRequestMetrics.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.id - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioDuration.description - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioDuration.format - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioDuration.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioTokenCount.description - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioTokenCount.format - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioTokenCount.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.numAudioFrames.description - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.numAudioFrames.format - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.numAudioFrames.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.type - schemas.LearningGenaiRootRequestMetricsImageMetrics.id - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.imageTokenCount.description - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.imageTokenCount.format - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.imageTokenCount.type - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.numImages.description - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.numImages.format - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.numImages.type - schemas.LearningGenaiRootRequestMetricsImageMetrics.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.id - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.audioSample.$ref - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.audioSample.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.numVideoFrames.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.numVideoFrames.format - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.numVideoFrames.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoDuration.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoDuration.format - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoDuration.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoFramesTokenCount.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoFramesTokenCount.format - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoFramesTokenCount.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.type - schemas.LearningGenaiRootRequestResponseTakedownResult.id - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.allowed.description - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.allowed.type - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.requestTakedownRegex.description - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.requestTakedownRegex.type - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.responseTakedownRegex.description - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.responseTakedownRegex.type - schemas.LearningGenaiRootRequestResponseTakedownResult.type - schemas.LearningGenaiRootRoutingDecision.description - schemas.LearningGenaiRootRoutingDecision.id - schemas.LearningGenaiRootRoutingDecision.properties.metadata.$ref - schemas.LearningGenaiRootRoutingDecision.properties.modelConfigId.description - schemas.LearningGenaiRootRoutingDecision.properties.modelConfigId.type - schemas.LearningGenaiRootRoutingDecision.type - schemas.LearningGenaiRootRoutingDecisionMetadata.description - schemas.LearningGenaiRootRoutingDecisionMetadata.id - schemas.LearningGenaiRootRoutingDecisionMetadata.properties.scoreBasedRoutingMetadata.$ref - schemas.LearningGenaiRootRoutingDecisionMetadata.properties.tokenLengthBasedRoutingMetadata.$ref - schemas.LearningGenaiRootRoutingDecisionMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.id - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.matchedRule.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.matchedRule.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.score.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.score.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.usedDefaultFallback.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.usedDefaultFallback.type - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.id - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelInputTokenMetadata.items.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelInputTokenMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelMaxTokenMetadata.items.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelMaxTokenMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.id - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.computedInputTokenLength.description - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.computedInputTokenLength.format - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.computedInputTokenLength.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.modelId.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.pickedAsFallback.description - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.pickedAsFallback.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.selected.description - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.selected.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.id - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumInputTokens.format - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumInputTokens.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumOutputTokens.format - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumOutputTokens.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.modelId.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.type - schemas.LearningGenaiRootRuleOutput.id - schemas.LearningGenaiRootRuleOutput.properties.decision.enum - schemas.LearningGenaiRootRuleOutput.properties.decision.enumDescriptions - schemas.LearningGenaiRootRuleOutput.properties.decision.type - schemas.LearningGenaiRootRuleOutput.properties.name.type - schemas.LearningGenaiRootRuleOutput.type - schemas.LearningGenaiRootScore.id - schemas.LearningGenaiRootScore.properties.calculationType.$ref - schemas.LearningGenaiRootScore.properties.internalMetadata.$ref - schemas.LearningGenaiRootScore.properties.internalMetadata.description - schemas.LearningGenaiRootScore.properties.thresholdType.$ref - schemas.LearningGenaiRootScore.properties.tokensAndLogprobPerDecodingStep.$ref - schemas.LearningGenaiRootScore.properties.tokensAndLogprobPerDecodingStep.description - schemas.LearningGenaiRootScore.properties.value.format - schemas.LearningGenaiRootScore.properties.value.type - schemas.LearningGenaiRootScore.type - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.id - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.equalOrGreaterThan.$ref - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.equalOrGreaterThan.description - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.lessThan.$ref - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.modelConfigId.description - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.modelConfigId.type - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.type - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.description - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.id - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.properties.phrase.$ref - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.properties.similarityScore.format - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.properties.similarityScore.type - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.type - schemas.LearningGenaiRootScoredToken.description - schemas.LearningGenaiRootScoredToken.id - schemas.LearningGenaiRootScoredToken.properties.endTokenScore.description - schemas.LearningGenaiRootScoredToken.properties.endTokenScore.format - schemas.LearningGenaiRootScoredToken.properties.endTokenScore.type - schemas.LearningGenaiRootScoredToken.properties.score.description - schemas.LearningGenaiRootScoredToken.properties.score.format - schemas.LearningGenaiRootScoredToken.properties.score.type - schemas.LearningGenaiRootScoredToken.properties.token.type - schemas.LearningGenaiRootScoredToken.type - schemas.LearningGenaiRootSimilarityTakedownPhrase.description - schemas.LearningGenaiRootSimilarityTakedownPhrase.id - schemas.LearningGenaiRootSimilarityTakedownPhrase.properties.blockedPhrase.type - schemas.LearningGenaiRootSimilarityTakedownPhrase.type - schemas.LearningGenaiRootSimilarityTakedownResult.id - schemas.LearningGenaiRootSimilarityTakedownResult.properties.allowed.description - schemas.LearningGenaiRootSimilarityTakedownResult.properties.allowed.type - schemas.LearningGenaiRootSimilarityTakedownResult.properties.scoredPhrases.description - schemas.LearningGenaiRootSimilarityTakedownResult.properties.scoredPhrases.items.$ref - schemas.LearningGenaiRootSimilarityTakedownResult.properties.scoredPhrases.type - schemas.LearningGenaiRootSimilarityTakedownResult.type - schemas.LearningGenaiRootTakedownResult.id - schemas.LearningGenaiRootTakedownResult.properties.allowed.description - schemas.LearningGenaiRootTakedownResult.properties.allowed.type - schemas.LearningGenaiRootTakedownResult.properties.regexTakedownResult.$ref - schemas.LearningGenaiRootTakedownResult.properties.requestResponseTakedownResult.$ref - schemas.LearningGenaiRootTakedownResult.properties.similarityTakedownResult.$ref - schemas.LearningGenaiRootTakedownResult.type - schemas.LearningGenaiRootThresholdType.description - schemas.LearningGenaiRootThresholdType.id - schemas.LearningGenaiRootThresholdType.properties.scoreType.enum - schemas.LearningGenaiRootThresholdType.properties.scoreType.enumDescriptions - schemas.LearningGenaiRootThresholdType.properties.scoreType.type - schemas.LearningGenaiRootThresholdType.properties.threshold.format - schemas.LearningGenaiRootThresholdType.properties.threshold.type - schemas.LearningGenaiRootThresholdType.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.id - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.chosenCandidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.chosenCandidates.items.$ref - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.chosenCandidates.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.topCandidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.topCandidates.items.$ref - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.topCandidates.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.id - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.logProbability.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.logProbability.format - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.logProbability.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.token.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.token.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.id - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.properties.candidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.properties.candidates.items.$ref - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.properties.candidates.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.type - schemas.LearningGenaiRootToxicityResult.description - schemas.LearningGenaiRootToxicityResult.id - schemas.LearningGenaiRootToxicityResult.properties.signals.items.$ref - schemas.LearningGenaiRootToxicityResult.properties.signals.type - schemas.LearningGenaiRootToxicityResult.type - schemas.LearningGenaiRootToxicitySignal.description - schemas.LearningGenaiRootToxicitySignal.id - schemas.LearningGenaiRootToxicitySignal.properties.allowed.type - schemas.LearningGenaiRootToxicitySignal.properties.label.enum - schemas.LearningGenaiRootToxicitySignal.properties.label.enumDescriptions - schemas.LearningGenaiRootToxicitySignal.properties.label.type - schemas.LearningGenaiRootToxicitySignal.properties.score.format - schemas.LearningGenaiRootToxicitySignal.properties.score.type - schemas.LearningGenaiRootToxicitySignal.type - schemas.LearningGenaiRootTranslationRequestInfo.description - schemas.LearningGenaiRootTranslationRequestInfo.id - schemas.LearningGenaiRootTranslationRequestInfo.properties.detectedLanguageCodes.description - schemas.LearningGenaiRootTranslationRequestInfo.properties.detectedLanguageCodes.items.type - schemas.LearningGenaiRootTranslationRequestInfo.properties.detectedLanguageCodes.type - schemas.LearningGenaiRootTranslationRequestInfo.properties.totalContentSize.description - schemas.LearningGenaiRootTranslationRequestInfo.properties.totalContentSize.format - schemas.LearningGenaiRootTranslationRequestInfo.properties.totalContentSize.type - schemas.LearningGenaiRootTranslationRequestInfo.type - schemas.LearningServingLlmAtlasOutputMetadata.id - schemas.LearningServingLlmAtlasOutputMetadata.properties.requestTopic.type - schemas.LearningServingLlmAtlasOutputMetadata.properties.source.enum - schemas.LearningServingLlmAtlasOutputMetadata.properties.source.enumDescriptions - schemas.LearningServingLlmAtlasOutputMetadata.properties.source.type - schemas.LearningServingLlmAtlasOutputMetadata.type - schemas.LearningServingLlmMessageMetadata.description - schemas.LearningServingLlmMessageMetadata.id - schemas.LearningServingLlmMessageMetadata.properties.atlasMetadata.$ref - schemas.LearningServingLlmMessageMetadata.properties.classifierSummary.$ref - schemas.LearningServingLlmMessageMetadata.properties.classifierSummary.description - schemas.LearningServingLlmMessageMetadata.properties.codeyOutput.$ref - schemas.LearningServingLlmMessageMetadata.properties.codeyOutput.description - schemas.LearningServingLlmMessageMetadata.properties.currentStreamTextLength.format - schemas.LearningServingLlmMessageMetadata.properties.currentStreamTextLength.type - schemas.LearningServingLlmMessageMetadata.properties.deleted.description - schemas.LearningServingLlmMessageMetadata.properties.deleted.type - schemas.LearningServingLlmMessageMetadata.properties.filterMeta.description - schemas.LearningServingLlmMessageMetadata.properties.filterMeta.items.$ref - schemas.LearningServingLlmMessageMetadata.properties.filterMeta.type - schemas.LearningServingLlmMessageMetadata.properties.finalMessageScore.$ref - schemas.LearningServingLlmMessageMetadata.properties.finalMessageScore.description - schemas.LearningServingLlmMessageMetadata.properties.finishReason.description - schemas.LearningServingLlmMessageMetadata.properties.finishReason.enum - schemas.LearningServingLlmMessageMetadata.properties.finishReason.enumDescriptions - schemas.LearningServingLlmMessageMetadata.properties.finishReason.type - schemas.LearningServingLlmMessageMetadata.properties.groundingMetadata.$ref - schemas.LearningServingLlmMessageMetadata.properties.isCode.description - schemas.LearningServingLlmMessageMetadata.properties.isCode.type - schemas.LearningServingLlmMessageMetadata.properties.isFallback.description - schemas.LearningServingLlmMessageMetadata.properties.isFallback.type - schemas.LearningServingLlmMessageMetadata.properties.langidResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.langidResult.description - schemas.LearningServingLlmMessageMetadata.properties.language.description - schemas.LearningServingLlmMessageMetadata.properties.language.type - schemas.LearningServingLlmMessageMetadata.properties.lmPrefix.description - schemas.LearningServingLlmMessageMetadata.properties.lmPrefix.type - schemas.LearningServingLlmMessageMetadata.properties.lmrootInternalRequestMetrics.$ref - schemas.LearningServingLlmMessageMetadata.properties.lmrootInternalRequestMetrics.description - schemas.LearningServingLlmMessageMetadata.properties.mmRecitationResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.mmRecitationResult.description - schemas.LearningServingLlmMessageMetadata.properties.numRewinds.description - schemas.LearningServingLlmMessageMetadata.properties.numRewinds.format - schemas.LearningServingLlmMessageMetadata.properties.numRewinds.type - schemas.LearningServingLlmMessageMetadata.properties.originalText.description - schemas.LearningServingLlmMessageMetadata.properties.originalText.type - schemas.LearningServingLlmMessageMetadata.properties.perStreamDecodedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.perStreamDecodedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.perStreamDecodedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.perStreamReturnedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.perStreamReturnedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.perStreamReturnedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.raiOutputs.description - schemas.LearningServingLlmMessageMetadata.properties.raiOutputs.items.$ref - schemas.LearningServingLlmMessageMetadata.properties.raiOutputs.type - schemas.LearningServingLlmMessageMetadata.properties.recitationResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.recitationResult.description - schemas.LearningServingLlmMessageMetadata.properties.scores.description - schemas.LearningServingLlmMessageMetadata.properties.scores.items.$ref - schemas.LearningServingLlmMessageMetadata.properties.scores.type - schemas.LearningServingLlmMessageMetadata.properties.streamTerminated.description - schemas.LearningServingLlmMessageMetadata.properties.streamTerminated.type - schemas.LearningServingLlmMessageMetadata.properties.totalDecodedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.totalDecodedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.totalDecodedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.totalReturnedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.totalReturnedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.totalReturnedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.translatedUserPrompts.description - schemas.LearningServingLlmMessageMetadata.properties.translatedUserPrompts.items.type - schemas.LearningServingLlmMessageMetadata.properties.translatedUserPrompts.type - schemas.LearningServingLlmMessageMetadata.properties.vertexRaiResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.vertexRaiResult.description - schemas.LearningServingLlmMessageMetadata.type - schemas.NlpSaftLangIdLocalesResult.id - schemas.NlpSaftLangIdLocalesResult.properties.predictions.description - schemas.NlpSaftLangIdLocalesResult.properties.predictions.items.$ref - schemas.NlpSaftLangIdLocalesResult.properties.predictions.type - schemas.NlpSaftLangIdLocalesResult.type - schemas.NlpSaftLangIdLocalesResultLocale.id - schemas.NlpSaftLangIdLocalesResultLocale.properties.languageCode.description - schemas.NlpSaftLangIdLocalesResultLocale.properties.languageCode.type - schemas.NlpSaftLangIdLocalesResultLocale.type - schemas.NlpSaftLangIdResult.id - schemas.NlpSaftLangIdResult.properties.modelVersion.description - schemas.NlpSaftLangIdResult.properties.modelVersion.enum - schemas.NlpSaftLangIdResult.properties.modelVersion.enumDescriptions - schemas.NlpSaftLangIdResult.properties.modelVersion.type - schemas.NlpSaftLangIdResult.properties.predictions.description - schemas.NlpSaftLangIdResult.properties.predictions.items.$ref - schemas.NlpSaftLangIdResult.properties.predictions.type - schemas.NlpSaftLangIdResult.properties.spanPredictions.description - schemas.NlpSaftLangIdResult.properties.spanPredictions.items.$ref - schemas.NlpSaftLangIdResult.properties.spanPredictions.type - schemas.NlpSaftLangIdResult.type - schemas.NlpSaftLanguageSpan.id - schemas.NlpSaftLanguageSpan.properties.end.format - schemas.NlpSaftLanguageSpan.properties.end.type - schemas.NlpSaftLanguageSpan.properties.languageCode.description - schemas.NlpSaftLanguageSpan.properties.languageCode.type - schemas.NlpSaftLanguageSpan.properties.locales.$ref - schemas.NlpSaftLanguageSpan.properties.locales.description - schemas.NlpSaftLanguageSpan.properties.probability.description - schemas.NlpSaftLanguageSpan.properties.probability.format - schemas.NlpSaftLanguageSpan.properties.probability.type - schemas.NlpSaftLanguageSpan.properties.start.description - schemas.NlpSaftLanguageSpan.properties.start.format - schemas.NlpSaftLanguageSpan.properties.start.type - schemas.NlpSaftLanguageSpan.type - schemas.NlpSaftLanguageSpanSequence.id - schemas.NlpSaftLanguageSpanSequence.properties.languageSpans.description - schemas.NlpSaftLanguageSpanSequence.properties.languageSpans.items.$ref - schemas.NlpSaftLanguageSpanSequence.properties.languageSpans.type - schemas.NlpSaftLanguageSpanSequence.properties.probability.description - schemas.NlpSaftLanguageSpanSequence.properties.probability.format - schemas.NlpSaftLanguageSpanSequence.properties.probability.type - schemas.NlpSaftLanguageSpanSequence.type - schemas.Proto2BridgeMessageSet.description - schemas.Proto2BridgeMessageSet.id - schemas.Proto2BridgeMessageSet.type - schemas.UtilStatusProto.description - schemas.UtilStatusProto.id - schemas.UtilStatusProto.properties.canonicalCode.description - schemas.UtilStatusProto.properties.canonicalCode.format - schemas.UtilStatusProto.properties.canonicalCode.type - schemas.UtilStatusProto.properties.code.description - schemas.UtilStatusProto.properties.code.format - schemas.UtilStatusProto.properties.code.type - schemas.UtilStatusProto.properties.message.description - schemas.UtilStatusProto.properties.message.type - schemas.UtilStatusProto.properties.messageSet.$ref - schemas.UtilStatusProto.properties.messageSet.description - schemas.UtilStatusProto.properties.space.description - schemas.UtilStatusProto.properties.space.type - schemas.UtilStatusProto.type The following keys were added: - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQuery.properties.rrf.$ref - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQuery.properties.rrf.description - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF.description - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF.id - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF.properties.alpha.description - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF.properties.alpha.format - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF.properties.alpha.type - schemas.GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF.type - schemas.GoogleCloudAiplatformV1beta1FindNeighborsResponseNeighbor.properties.sparseDistance.description - schemas.GoogleCloudAiplatformV1beta1FindNeighborsResponseNeighbor.properties.sparseDistance.format - schemas.GoogleCloudAiplatformV1beta1FindNeighborsResponseNeighbor.properties.sparseDistance.type - schemas.GoogleCloudAiplatformV1beta1IndexDatapoint.properties.sparseEmbedding.$ref - schemas.GoogleCloudAiplatformV1beta1IndexDatapoint.properties.sparseEmbedding.description - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.description - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.id - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.dimensions.description - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.dimensions.items.format - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.dimensions.items.type - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.dimensions.type - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.values.description - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.values.items.format - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.values.items.type - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.properties.values.type - schemas.GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding.type - schemas.GoogleCloudAiplatformV1beta1IndexStats.properties.sparseVectorsCount.description - schemas.GoogleCloudAiplatformV1beta1IndexStats.properties.sparseVectorsCount.format - schemas.GoogleCloudAiplatformV1beta1IndexStats.properties.sparseVectorsCount.readOnly - schemas.GoogleCloudAiplatformV1beta1IndexStats.properties.sparseVectorsCount.type The following keys were changed: - schemas.GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataRecordError.properties.errorType.enum - schemas.GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataRecordError.properties.errorType.enumDescriptions #### aiplatform:v1 The following keys were deleted: - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.description - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.id - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.properties.videoEmbeddings.description - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.properties.videoEmbeddings.items.type - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.properties.videoEmbeddings.type - schemas.CloudAiLargeModelsVisionEmbedVideoResponse.type - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.description - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.id - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.properties.response.$ref - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.properties.response.description - schemas.CloudAiLargeModelsVisionMediaGenerateContentResponse.type - schemas.CloudAiLargeModelsVisionReasonVideoResponse.description - schemas.CloudAiLargeModelsVisionReasonVideoResponse.id - schemas.CloudAiLargeModelsVisionReasonVideoResponse.properties.responses.description - schemas.CloudAiLargeModelsVisionReasonVideoResponse.properties.responses.items.$ref - schemas.CloudAiLargeModelsVisionReasonVideoResponse.properties.responses.type - schemas.CloudAiLargeModelsVisionReasonVideoResponse.type - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.description - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.id - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.relativeTemporalPartition.$ref - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.relativeTemporalPartition.description - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.text.description - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.properties.text.type - schemas.CloudAiLargeModelsVisionReasonVideoResponseTextResponse.type - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.description - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.id - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.endOffset.description - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.endOffset.format - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.endOffset.type - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.startOffset.description - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.startOffset.format - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.properties.startOffset.type - schemas.CloudAiLargeModelsVisionRelativeTemporalPartition.type - schemas.CloudAiNlLlmProtoServiceCandidate.id - schemas.CloudAiNlLlmProtoServiceCandidate.properties.citationMetadata.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.citationMetadata.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.content.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.content.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishMessage.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishMessage.type - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.enum - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.enumDescriptions - schemas.CloudAiNlLlmProtoServiceCandidate.properties.finishReason.type - schemas.CloudAiNlLlmProtoServiceCandidate.properties.groundingMetadata.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.groundingMetadata.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.index.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.index.format - schemas.CloudAiNlLlmProtoServiceCandidate.properties.index.type - schemas.CloudAiNlLlmProtoServiceCandidate.properties.safetyRatings.description - schemas.CloudAiNlLlmProtoServiceCandidate.properties.safetyRatings.items.$ref - schemas.CloudAiNlLlmProtoServiceCandidate.properties.safetyRatings.type - schemas.CloudAiNlLlmProtoServiceCandidate.type - schemas.CloudAiNlLlmProtoServiceCitation.description - schemas.CloudAiNlLlmProtoServiceCitation.id - schemas.CloudAiNlLlmProtoServiceCitation.properties.endIndex.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.endIndex.format - schemas.CloudAiNlLlmProtoServiceCitation.properties.endIndex.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.license.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.license.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.publicationDate.$ref - schemas.CloudAiNlLlmProtoServiceCitation.properties.publicationDate.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.startIndex.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.startIndex.format - schemas.CloudAiNlLlmProtoServiceCitation.properties.startIndex.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.title.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.title.type - schemas.CloudAiNlLlmProtoServiceCitation.properties.uri.description - schemas.CloudAiNlLlmProtoServiceCitation.properties.uri.type - schemas.CloudAiNlLlmProtoServiceCitation.type - schemas.CloudAiNlLlmProtoServiceCitationMetadata.description - schemas.CloudAiNlLlmProtoServiceCitationMetadata.id - schemas.CloudAiNlLlmProtoServiceCitationMetadata.properties.citations.description - schemas.CloudAiNlLlmProtoServiceCitationMetadata.properties.citations.items.$ref - schemas.CloudAiNlLlmProtoServiceCitationMetadata.properties.citations.type - schemas.CloudAiNlLlmProtoServiceCitationMetadata.type - schemas.CloudAiNlLlmProtoServiceContent.description - schemas.CloudAiNlLlmProtoServiceContent.id - schemas.CloudAiNlLlmProtoServiceContent.properties.isCached.description - schemas.CloudAiNlLlmProtoServiceContent.properties.isCached.type - schemas.CloudAiNlLlmProtoServiceContent.properties.parts.description - schemas.CloudAiNlLlmProtoServiceContent.properties.parts.items.$ref - schemas.CloudAiNlLlmProtoServiceContent.properties.parts.type - schemas.CloudAiNlLlmProtoServiceContent.properties.role.description - schemas.CloudAiNlLlmProtoServiceContent.properties.role.type - schemas.CloudAiNlLlmProtoServiceContent.type - schemas.CloudAiNlLlmProtoServiceFact.description - schemas.CloudAiNlLlmProtoServiceFact.id - schemas.CloudAiNlLlmProtoServiceFact.properties.query.description - schemas.CloudAiNlLlmProtoServiceFact.properties.query.type - schemas.CloudAiNlLlmProtoServiceFact.properties.summary.description - schemas.CloudAiNlLlmProtoServiceFact.properties.summary.type - schemas.CloudAiNlLlmProtoServiceFact.properties.title.description - schemas.CloudAiNlLlmProtoServiceFact.properties.title.type - schemas.CloudAiNlLlmProtoServiceFact.properties.url.description - schemas.CloudAiNlLlmProtoServiceFact.properties.url.type - schemas.CloudAiNlLlmProtoServiceFact.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.id - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.additionalProperties.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.additionalProperties.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.args.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.name.description - schemas.CloudAiNlLlmProtoServiceFunctionCall.properties.name.type - schemas.CloudAiNlLlmProtoServiceFunctionCall.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.id - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.name.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.name.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.additionalProperties.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.additionalProperties.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.description - schemas.CloudAiNlLlmProtoServiceFunctionResponse.properties.response.type - schemas.CloudAiNlLlmProtoServiceFunctionResponse.type - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.id - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.candidates.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.candidates.items.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.candidates.type - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.debugMetadata.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.debugMetadata.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.facts.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.facts.items.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.facts.type - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.promptFeedback.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.promptFeedback.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.reportingMetrics.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.reportingMetrics.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.usageMetadata.$ref - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.properties.usageMetadata.description - schemas.CloudAiNlLlmProtoServiceGenerateMultiModalResponse.type - schemas.CloudAiNlLlmProtoServiceMessageMetadata.id - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.factualityDebugMetadata.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.factualityDebugMetadata.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.inputFilterInfo.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.inputFilterInfo.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.modelRoutingDecision.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.modelRoutingDecision.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.outputFilterInfo.description - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.outputFilterInfo.items.$ref - schemas.CloudAiNlLlmProtoServiceMessageMetadata.properties.outputFilterInfo.type - schemas.CloudAiNlLlmProtoServiceMessageMetadata.type - schemas.CloudAiNlLlmProtoServicePart.description - schemas.CloudAiNlLlmProtoServicePart.id - schemas.CloudAiNlLlmProtoServicePart.properties.documentMetadata.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.documentMetadata.description - schemas.CloudAiNlLlmProtoServicePart.properties.fileData.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.fileData.description - schemas.CloudAiNlLlmProtoServicePart.properties.functionCall.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.functionCall.description - schemas.CloudAiNlLlmProtoServicePart.properties.functionResponse.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.functionResponse.description - schemas.CloudAiNlLlmProtoServicePart.properties.inlineData.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.inlineData.description - schemas.CloudAiNlLlmProtoServicePart.properties.lmRootMetadata.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.lmRootMetadata.description - schemas.CloudAiNlLlmProtoServicePart.properties.text.description - schemas.CloudAiNlLlmProtoServicePart.properties.text.type - schemas.CloudAiNlLlmProtoServicePart.properties.videoMetadata.$ref - schemas.CloudAiNlLlmProtoServicePart.properties.videoMetadata.description - schemas.CloudAiNlLlmProtoServicePart.type - schemas.CloudAiNlLlmProtoServicePartBlob.description - schemas.CloudAiNlLlmProtoServicePartBlob.id - schemas.CloudAiNlLlmProtoServicePartBlob.properties.data.description - schemas.CloudAiNlLlmProtoServicePartBlob.properties.data.format - schemas.CloudAiNlLlmProtoServicePartBlob.properties.data.type - schemas.CloudAiNlLlmProtoServicePartBlob.properties.mimeType.description - schemas.CloudAiNlLlmProtoServicePartBlob.properties.mimeType.type - schemas.CloudAiNlLlmProtoServicePartBlob.properties.originalFileData.$ref - schemas.CloudAiNlLlmProtoServicePartBlob.properties.originalFileData.description - schemas.CloudAiNlLlmProtoServicePartBlob.type - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.description - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.id - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.originalDocumentBlob.$ref - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.originalDocumentBlob.description - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.pageNumber.description - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.pageNumber.format - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.properties.pageNumber.type - schemas.CloudAiNlLlmProtoServicePartDocumentMetadata.type - schemas.CloudAiNlLlmProtoServicePartFileData.description - schemas.CloudAiNlLlmProtoServicePartFileData.id - schemas.CloudAiNlLlmProtoServicePartFileData.properties.fileUri.description - schemas.CloudAiNlLlmProtoServicePartFileData.properties.fileUri.type - schemas.CloudAiNlLlmProtoServicePartFileData.properties.mimeType.description - schemas.CloudAiNlLlmProtoServicePartFileData.properties.mimeType.type - schemas.CloudAiNlLlmProtoServicePartFileData.type - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.description - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.id - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.properties.chunkId.description - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.properties.chunkId.type - schemas.CloudAiNlLlmProtoServicePartLMRootMetadata.type - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.description - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.id - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.endOffset.description - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.endOffset.format - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.endOffset.type - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.startOffset.description - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.startOffset.format - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.properties.startOffset.type - schemas.CloudAiNlLlmProtoServicePartVideoMetadata.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.id - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.enum - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.enumDescriptions - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReason.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReasonMessage.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.blockReasonMessage.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.safetyRatings.description - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.safetyRatings.items.$ref - schemas.CloudAiNlLlmProtoServicePromptFeedback.properties.safetyRatings.type - schemas.CloudAiNlLlmProtoServicePromptFeedback.type - schemas.CloudAiNlLlmProtoServiceRaiResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.id - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.aidaRecitationResult.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.aidaRecitationResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.blocked.deprecated - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.blocked.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.blocked.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.items.format - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.items.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.errorCodes.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.filtered.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.filtered.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.languageFilterResult.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.languageFilterResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.mmRecitationResult.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.mmRecitationResult.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.raiSignals.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.raiSignals.items.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.raiSignals.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.translationRequestInfos.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.translationRequestInfos.items.$ref - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.translationRequestInfos.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredBlocklist.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredBlocklist.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredRecitation.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredRecitation.type - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredSafetyFilter.description - schemas.CloudAiNlLlmProtoServiceRaiResult.properties.triggeredSafetyFilter.type - schemas.CloudAiNlLlmProtoServiceRaiResult.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.id - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.enum - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.enumDescriptions - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.confidence.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.flagged.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.flagged.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.influentialTerms.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.influentialTerms.items.$ref - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.influentialTerms.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.enum - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.enumDescriptions - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.raiCategory.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.score.description - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.score.format - schemas.CloudAiNlLlmProtoServiceRaiSignal.properties.score.type - schemas.CloudAiNlLlmProtoServiceRaiSignal.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.id - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.beginOffset.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.beginOffset.format - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.beginOffset.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.confidence.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.confidence.format - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.confidence.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.enum - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.enumDescriptions - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.source.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.term.description - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.properties.term.type - schemas.CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.id - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.blocked.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.blocked.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.enum - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.category.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.influentialTerms.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.influentialTerms.items.$ref - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.influentialTerms.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.enum - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probability.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probabilityScore.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probabilityScore.format - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.probabilityScore.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.enum - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severity.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severityScore.description - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severityScore.format - schemas.CloudAiNlLlmProtoServiceSafetyRating.properties.severityScore.type - schemas.CloudAiNlLlmProtoServiceSafetyRating.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.id - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.beginOffset.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.beginOffset.format - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.beginOffset.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.confidence.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.confidence.format - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.confidence.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.enum - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.enumDescriptions - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.source.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.term.description - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.properties.term.type - schemas.CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.description - schemas.CloudAiNlLlmProtoServiceUsageMetadata.id - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.candidatesTokenCount.description - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.candidatesTokenCount.format - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.candidatesTokenCount.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.promptTokenCount.description - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.promptTokenCount.format - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.promptTokenCount.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.totalTokenCount.format - schemas.CloudAiNlLlmProtoServiceUsageMetadata.properties.totalTokenCount.type - schemas.CloudAiNlLlmProtoServiceUsageMetadata.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.id - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.argentumMetricId.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.argentumMetricId.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.doubleValue.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.doubleValue.format - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.doubleValue.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.int64Value.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.int64Value.format - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.int64Value.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.metricName.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.metricName.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.systemLabels.description - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.systemLabels.items.$ref - schemas.IntelligenceCloudAutomlXpsMetricEntry.properties.systemLabels.type - schemas.IntelligenceCloudAutomlXpsMetricEntry.type - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.id - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelName.description - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelName.type - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelValue.description - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.properties.labelValue.type - schemas.IntelligenceCloudAutomlXpsMetricEntryLabel.type - schemas.IntelligenceCloudAutomlXpsReportingMetrics.id - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.deprecated - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.description - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.format - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.effectiveTrainingDuration.type - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.metricEntries.description - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.metricEntries.items.$ref - schemas.IntelligenceCloudAutomlXpsReportingMetrics.properties.metricEntries.type - schemas.IntelligenceCloudAutomlXpsReportingMetrics.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.id - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.amarnaId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.arxivId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.author.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bibkey.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.biorxivId.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.biorxivId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookTitle.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookVolumeId.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookVolumeId.format - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.bookVolumeId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.category.enum - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.category.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.category.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.conversationId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.enum - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.dataset.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.filepath.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.geminiId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.gnewsArticleTitle.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.goodallExampleId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.isOptOut.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.isOptOut.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.isPrompt.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.lamdaExampleId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.license.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.meenaConversationId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.naturalLanguageCode.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.naturalLanguageCode.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.noAttribution.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.noAttribution.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.podcastUtteranceId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.publicationDate.$ref - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.qualityScoreExperimentOnly.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.qualityScoreExperimentOnly.format - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.qualityScoreExperimentOnly.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.repo.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.repo.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.url.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.url.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.volumeId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.wikipediaArticleTitle.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.wikipediaArticleTitle.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.youtubeVideoId.description - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.properties.youtubeVideoId.type - schemas.LanguageLabsAidaTrustRecitationProtoDocAttribution.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.description - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.id - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.dynamicSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.dynamicSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.description - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.enum - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.recitationAction.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.trainingSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.properties.trainingSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoRecitationResult.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.id - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.enum - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.attributionDataset.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.displayAttributionMessage.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.displayAttributionMessage.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docAttribution.$ref - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docOccurrences.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docOccurrences.format - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.docOccurrences.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.endIndex.format - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.endIndex.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.rawText.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.rawText.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.segmentRecitationAction.enum - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.segmentRecitationAction.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.segmentRecitationAction.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.enum - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.sourceCategory.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.startIndex.description - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.startIndex.format - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.properties.startIndex.type - schemas.LanguageLabsAidaTrustRecitationProtoSegmentResult.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.id - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.dynamicSegmentResults.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.dynamicSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.dynamicSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.fullyCheckedTextIndex.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.fullyCheckedTextIndex.format - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.fullyCheckedTextIndex.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.enum - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.enumDescriptions - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.recitationAction.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.trainingSegmentResults.description - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.trainingSegmentResults.items.$ref - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.properties.trainingSegmentResults.type - schemas.LanguageLabsAidaTrustRecitationProtoStreamRecitationResult.type - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.description - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.id - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.properties.imageResult.$ref - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.properties.textResult.$ref - schemas.LearningGenaiRecitationContentChunkRecitationCheckResult.type - schemas.LearningGenaiRecitationDocAttribution.description - schemas.LearningGenaiRecitationDocAttribution.id - schemas.LearningGenaiRecitationDocAttribution.properties.amarnaId.type - schemas.LearningGenaiRecitationDocAttribution.properties.arxivId.type - schemas.LearningGenaiRecitationDocAttribution.properties.author.type - schemas.LearningGenaiRecitationDocAttribution.properties.bibkey.type - schemas.LearningGenaiRecitationDocAttribution.properties.biorxivId.description - schemas.LearningGenaiRecitationDocAttribution.properties.biorxivId.type - schemas.LearningGenaiRecitationDocAttribution.properties.bookTitle.type - schemas.LearningGenaiRecitationDocAttribution.properties.bookVolumeId.description - schemas.LearningGenaiRecitationDocAttribution.properties.bookVolumeId.format - schemas.LearningGenaiRecitationDocAttribution.properties.bookVolumeId.type - schemas.LearningGenaiRecitationDocAttribution.properties.conversationId.type - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.description - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.enum - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.enumDescriptions - schemas.LearningGenaiRecitationDocAttribution.properties.dataset.type - schemas.LearningGenaiRecitationDocAttribution.properties.filepath.type - schemas.LearningGenaiRecitationDocAttribution.properties.geminiId.type - schemas.LearningGenaiRecitationDocAttribution.properties.gnewsArticleTitle.type - schemas.LearningGenaiRecitationDocAttribution.properties.goodallExampleId.type - schemas.LearningGenaiRecitationDocAttribution.properties.isOptOut.description - schemas.LearningGenaiRecitationDocAttribution.properties.isOptOut.type - schemas.LearningGenaiRecitationDocAttribution.properties.isPrompt.description - schemas.LearningGenaiRecitationDocAttribution.properties.isPrompt.type - schemas.LearningGenaiRecitationDocAttribution.properties.lamdaExampleId.type - schemas.LearningGenaiRecitationDocAttribution.properties.license.type - schemas.LearningGenaiRecitationDocAttribution.properties.meenaConversationId.type - schemas.LearningGenaiRecitationDocAttribution.properties.naturalLanguageCode.description - schemas.LearningGenaiRecitationDocAttribution.properties.naturalLanguageCode.type - schemas.LearningGenaiRecitationDocAttribution.properties.noAttribution.description - schemas.LearningGenaiRecitationDocAttribution.properties.noAttribution.type - schemas.LearningGenaiRecitationDocAttribution.properties.podcastUtteranceId.type - schemas.LearningGenaiRecitationDocAttribution.properties.publicationDate.$ref - schemas.LearningGenaiRecitationDocAttribution.properties.qualityScoreExperimentOnly.description - schemas.LearningGenaiRecitationDocAttribution.properties.qualityScoreExperimentOnly.format - schemas.LearningGenaiRecitationDocAttribution.properties.qualityScoreExperimentOnly.type - schemas.LearningGenaiRecitationDocAttribution.properties.repo.description - schemas.LearningGenaiRecitationDocAttribution.properties.repo.type - schemas.LearningGenaiRecitationDocAttribution.properties.url.description - schemas.LearningGenaiRecitationDocAttribution.properties.url.type - schemas.LearningGenaiRecitationDocAttribution.properties.volumeId.type - schemas.LearningGenaiRecitationDocAttribution.properties.wikipediaArticleTitle.description - schemas.LearningGenaiRecitationDocAttribution.properties.wikipediaArticleTitle.type - schemas.LearningGenaiRecitationDocAttribution.properties.youtubeVideoId.type - schemas.LearningGenaiRecitationDocAttribution.type - schemas.LearningGenaiRecitationImageDocAttribution.description - schemas.LearningGenaiRecitationImageDocAttribution.id - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.description - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.enum - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.enumDescriptions - schemas.LearningGenaiRecitationImageDocAttribution.properties.datasetName.type - schemas.LearningGenaiRecitationImageDocAttribution.properties.stringDocids.description - schemas.LearningGenaiRecitationImageDocAttribution.properties.stringDocids.type - schemas.LearningGenaiRecitationImageDocAttribution.type - schemas.LearningGenaiRecitationImageRecitationCheckResult.id - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.description - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.enum - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.enumDescriptions - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitationAction.type - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitedImages.description - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitedImages.items.$ref - schemas.LearningGenaiRecitationImageRecitationCheckResult.properties.recitedImages.type - schemas.LearningGenaiRecitationImageRecitationCheckResult.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.id - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.docAttribution.$ref - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.docAttribution.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.enum - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.enumDescriptions - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.embeddingModel.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.imageId.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.imageId.format - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.imageId.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.scores.description - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.scores.format - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.properties.scores.type - schemas.LearningGenaiRecitationImageRecitationCheckResultSimilarImage.type - schemas.LearningGenaiRecitationMMRecitationCheckResult.description - schemas.LearningGenaiRecitationMMRecitationCheckResult.id - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.chunkResults.items.$ref - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.chunkResults.type - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.description - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.enum - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.enumDescriptions - schemas.LearningGenaiRecitationMMRecitationCheckResult.properties.recitationAction.type - schemas.LearningGenaiRecitationMMRecitationCheckResult.type - schemas.LearningGenaiRecitationRecitationResult.description - schemas.LearningGenaiRecitationRecitationResult.id - schemas.LearningGenaiRecitationRecitationResult.properties.dynamicSegmentResults.items.$ref - schemas.LearningGenaiRecitationRecitationResult.properties.dynamicSegmentResults.type - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.description - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.enum - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.enumDescriptions - schemas.LearningGenaiRecitationRecitationResult.properties.recitationAction.type - schemas.LearningGenaiRecitationRecitationResult.properties.trainingSegmentResults.items.$ref - schemas.LearningGenaiRecitationRecitationResult.properties.trainingSegmentResults.type - schemas.LearningGenaiRecitationRecitationResult.type - schemas.LearningGenaiRecitationSegmentResult.description - schemas.LearningGenaiRecitationSegmentResult.id - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.description - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.enum - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.enumDescriptions - schemas.LearningGenaiRecitationSegmentResult.properties.attributionDataset.type - schemas.LearningGenaiRecitationSegmentResult.properties.displayAttributionMessage.description - schemas.LearningGenaiRecitationSegmentResult.properties.displayAttributionMessage.type - schemas.LearningGenaiRecitationSegmentResult.properties.docAttribution.$ref - schemas.LearningGenaiRecitationSegmentResult.properties.docOccurrences.description - schemas.LearningGenaiRecitationSegmentResult.properties.docOccurrences.format - schemas.LearningGenaiRecitationSegmentResult.properties.docOccurrences.type - schemas.LearningGenaiRecitationSegmentResult.properties.endIndex.format - schemas.LearningGenaiRecitationSegmentResult.properties.endIndex.type - schemas.LearningGenaiRecitationSegmentResult.properties.rawText.description - schemas.LearningGenaiRecitationSegmentResult.properties.rawText.type - schemas.LearningGenaiRecitationSegmentResult.properties.segmentRecitationAction.enum - schemas.LearningGenaiRecitationSegmentResult.properties.segmentRecitationAction.enumDescriptions - schemas.LearningGenaiRecitationSegmentResult.properties.segmentRecitationAction.type - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.description - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.enum - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.enumDescriptions - schemas.LearningGenaiRecitationSegmentResult.properties.sourceCategory.type - schemas.LearningGenaiRecitationSegmentResult.properties.startIndex.description - schemas.LearningGenaiRecitationSegmentResult.properties.startIndex.format - schemas.LearningGenaiRecitationSegmentResult.properties.startIndex.type - schemas.LearningGenaiRecitationSegmentResult.type - schemas.LearningGenaiRootCalculationType.description - schemas.LearningGenaiRootCalculationType.id - schemas.LearningGenaiRootCalculationType.properties.scoreType.enum - schemas.LearningGenaiRootCalculationType.properties.scoreType.enumDescriptions - schemas.LearningGenaiRootCalculationType.properties.scoreType.type - schemas.LearningGenaiRootCalculationType.properties.weights.format - schemas.LearningGenaiRootCalculationType.properties.weights.type - schemas.LearningGenaiRootCalculationType.type - schemas.LearningGenaiRootClassifierOutput.id - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutput.$ref - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutput.deprecated - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutput.description - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutputs.description - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutputs.items.$ref - schemas.LearningGenaiRootClassifierOutput.properties.ruleOutputs.type - schemas.LearningGenaiRootClassifierOutput.properties.state.$ref - schemas.LearningGenaiRootClassifierOutput.properties.state.description - schemas.LearningGenaiRootClassifierOutput.type - schemas.LearningGenaiRootClassifierOutputSummary.id - schemas.LearningGenaiRootClassifierOutputSummary.properties.metrics.items.$ref - schemas.LearningGenaiRootClassifierOutputSummary.properties.metrics.type - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutput.$ref - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutput.deprecated - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutput.description - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutputs.description - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutputs.items.$ref - schemas.LearningGenaiRootClassifierOutputSummary.properties.ruleOutputs.type - schemas.LearningGenaiRootClassifierOutputSummary.type - schemas.LearningGenaiRootClassifierState.description - schemas.LearningGenaiRootClassifierState.id - schemas.LearningGenaiRootClassifierState.properties.dataProviderOutput.items.$ref - schemas.LearningGenaiRootClassifierState.properties.dataProviderOutput.type - schemas.LearningGenaiRootClassifierState.properties.metricOutput.items.$ref - schemas.LearningGenaiRootClassifierState.properties.metricOutput.type - schemas.LearningGenaiRootClassifierState.type - schemas.LearningGenaiRootCodeyChatMetadata.description - schemas.LearningGenaiRootCodeyChatMetadata.id - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.description - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.enum - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.enumDescriptions - schemas.LearningGenaiRootCodeyChatMetadata.properties.codeLanguage.type - schemas.LearningGenaiRootCodeyChatMetadata.type - schemas.LearningGenaiRootCodeyCheckpoint.description - schemas.LearningGenaiRootCodeyCheckpoint.id - schemas.LearningGenaiRootCodeyCheckpoint.properties.codeyTruncatorMetadata.$ref - schemas.LearningGenaiRootCodeyCheckpoint.properties.codeyTruncatorMetadata.description - schemas.LearningGenaiRootCodeyCheckpoint.properties.currentSample.description - schemas.LearningGenaiRootCodeyCheckpoint.properties.currentSample.type - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.description - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.enum - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.enumDeprecated - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.enumDescriptions - schemas.LearningGenaiRootCodeyCheckpoint.properties.postInferenceStep.type - schemas.LearningGenaiRootCodeyCheckpoint.type - schemas.LearningGenaiRootCodeyCompletionMetadata.description - schemas.LearningGenaiRootCodeyCompletionMetadata.id - schemas.LearningGenaiRootCodeyCompletionMetadata.properties.checkpoints.items.$ref - schemas.LearningGenaiRootCodeyCompletionMetadata.properties.checkpoints.type - schemas.LearningGenaiRootCodeyCompletionMetadata.type - schemas.LearningGenaiRootCodeyGenerationMetadata.description - schemas.LearningGenaiRootCodeyGenerationMetadata.id - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.output.description - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.output.type - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.description - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.enum - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.enumDeprecated - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.enumDescriptions - schemas.LearningGenaiRootCodeyGenerationMetadata.properties.postInferenceStep.type - schemas.LearningGenaiRootCodeyGenerationMetadata.type - schemas.LearningGenaiRootCodeyOutput.description - schemas.LearningGenaiRootCodeyOutput.id - schemas.LearningGenaiRootCodeyOutput.properties.codeyChatMetadata.$ref - schemas.LearningGenaiRootCodeyOutput.properties.codeyCompletionMetadata.$ref - schemas.LearningGenaiRootCodeyOutput.properties.codeyGenerationMetadata.$ref - schemas.LearningGenaiRootCodeyOutput.type - schemas.LearningGenaiRootCodeyTruncatorMetadata.description - schemas.LearningGenaiRootCodeyTruncatorMetadata.id - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.cutoffIndex.description - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.cutoffIndex.format - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.cutoffIndex.type - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.truncatedText.description - schemas.LearningGenaiRootCodeyTruncatorMetadata.properties.truncatedText.type - schemas.LearningGenaiRootCodeyTruncatorMetadata.type - schemas.LearningGenaiRootControlDecodingConfigThreshold.description - schemas.LearningGenaiRootControlDecodingConfigThreshold.id - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.policy.enum - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.policy.enumDescriptions - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.policy.type - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.scoreMax.format - schemas.LearningGenaiRootControlDecodingConfigThreshold.properties.scoreMax.type - schemas.LearningGenaiRootControlDecodingConfigThreshold.type - schemas.LearningGenaiRootControlDecodingRecord.description - schemas.LearningGenaiRootControlDecodingRecord.id - schemas.LearningGenaiRootControlDecodingRecord.properties.prefixes.description - schemas.LearningGenaiRootControlDecodingRecord.properties.prefixes.type - schemas.LearningGenaiRootControlDecodingRecord.properties.scores.description - schemas.LearningGenaiRootControlDecodingRecord.properties.scores.items.$ref - schemas.LearningGenaiRootControlDecodingRecord.properties.scores.type - schemas.LearningGenaiRootControlDecodingRecord.properties.suffiexes.description - schemas.LearningGenaiRootControlDecodingRecord.properties.suffiexes.type - schemas.LearningGenaiRootControlDecodingRecord.properties.thresholds.description - schemas.LearningGenaiRootControlDecodingRecord.properties.thresholds.items.$ref - schemas.LearningGenaiRootControlDecodingRecord.properties.thresholds.type - schemas.LearningGenaiRootControlDecodingRecord.type - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.id - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.policy.enum - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.policy.enumDescriptions - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.policy.type - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.score.format - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.properties.score.type - schemas.LearningGenaiRootControlDecodingRecordPolicyScore.type - schemas.LearningGenaiRootControlDecodingRecords.id - schemas.LearningGenaiRootControlDecodingRecords.properties.records.description - schemas.LearningGenaiRootControlDecodingRecords.properties.records.items.$ref - schemas.LearningGenaiRootControlDecodingRecords.properties.records.type - schemas.LearningGenaiRootControlDecodingRecords.type - schemas.LearningGenaiRootDataProviderOutput.id - schemas.LearningGenaiRootDataProviderOutput.properties.name.type - schemas.LearningGenaiRootDataProviderOutput.properties.status.$ref - schemas.LearningGenaiRootDataProviderOutput.properties.status.description - schemas.LearningGenaiRootDataProviderOutput.type - schemas.LearningGenaiRootFilterMetadata.id - schemas.LearningGenaiRootFilterMetadata.properties.confidence.description - schemas.LearningGenaiRootFilterMetadata.properties.confidence.enum - schemas.LearningGenaiRootFilterMetadata.properties.confidence.enumDescriptions - schemas.LearningGenaiRootFilterMetadata.properties.confidence.type - schemas.LearningGenaiRootFilterMetadata.properties.debugInfo.$ref - schemas.LearningGenaiRootFilterMetadata.properties.debugInfo.description - schemas.LearningGenaiRootFilterMetadata.properties.fallback.description - schemas.LearningGenaiRootFilterMetadata.properties.fallback.type - schemas.LearningGenaiRootFilterMetadata.properties.info.description - schemas.LearningGenaiRootFilterMetadata.properties.info.type - schemas.LearningGenaiRootFilterMetadata.properties.name.description - schemas.LearningGenaiRootFilterMetadata.properties.name.type - schemas.LearningGenaiRootFilterMetadata.properties.reason.description - schemas.LearningGenaiRootFilterMetadata.properties.reason.enum - schemas.LearningGenaiRootFilterMetadata.properties.reason.enumDescriptions - schemas.LearningGenaiRootFilterMetadata.properties.reason.type - schemas.LearningGenaiRootFilterMetadata.properties.text.description - schemas.LearningGenaiRootFilterMetadata.properties.text.type - schemas.LearningGenaiRootFilterMetadata.type - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.id - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.classifierOutput.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.defaultMetadata.type - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.languageFilterResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiOutput.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiOutput.description - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiSignal.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.raiSignal.deprecated - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.records.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.records.description - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.streamRecitationResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.streamRecitationResult.deprecated - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.takedownResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.properties.toxicityResult.$ref - schemas.LearningGenaiRootFilterMetadataFilterDebugInfo.type - schemas.LearningGenaiRootGroundingMetadata.id - schemas.LearningGenaiRootGroundingMetadata.properties.citations.items.$ref - schemas.LearningGenaiRootGroundingMetadata.properties.citations.type - schemas.LearningGenaiRootGroundingMetadata.properties.groundingCancelled.description - schemas.LearningGenaiRootGroundingMetadata.properties.groundingCancelled.type - schemas.LearningGenaiRootGroundingMetadata.properties.searchQueries.items.type - schemas.LearningGenaiRootGroundingMetadata.properties.searchQueries.type - schemas.LearningGenaiRootGroundingMetadata.type - schemas.LearningGenaiRootGroundingMetadataCitation.id - schemas.LearningGenaiRootGroundingMetadataCitation.properties.endIndex.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.endIndex.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.endIndex.type - schemas.LearningGenaiRootGroundingMetadataCitation.properties.factIndex.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.factIndex.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.factIndex.type - schemas.LearningGenaiRootGroundingMetadataCitation.properties.score.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.score.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.score.type - schemas.LearningGenaiRootGroundingMetadataCitation.properties.startIndex.description - schemas.LearningGenaiRootGroundingMetadataCitation.properties.startIndex.format - schemas.LearningGenaiRootGroundingMetadataCitation.properties.startIndex.type - schemas.LearningGenaiRootGroundingMetadataCitation.type - schemas.LearningGenaiRootHarm.id - schemas.LearningGenaiRootHarm.properties.contextualDangerous.description - schemas.LearningGenaiRootHarm.properties.contextualDangerous.type - schemas.LearningGenaiRootHarm.properties.csam.type - schemas.LearningGenaiRootHarm.properties.fringe.type - schemas.LearningGenaiRootHarm.properties.grailImageHarmType.$ref - schemas.LearningGenaiRootHarm.properties.grailTextHarmType.$ref - schemas.LearningGenaiRootHarm.properties.imageChild.type - schemas.LearningGenaiRootHarm.properties.imageCsam.type - schemas.LearningGenaiRootHarm.properties.imagePedo.type - schemas.LearningGenaiRootHarm.properties.imagePorn.description - schemas.LearningGenaiRootHarm.properties.imagePorn.type - schemas.LearningGenaiRootHarm.properties.imageViolence.type - schemas.LearningGenaiRootHarm.properties.pqc.type - schemas.LearningGenaiRootHarm.properties.safetycat.$ref - schemas.LearningGenaiRootHarm.properties.spii.$ref - schemas.LearningGenaiRootHarm.properties.spii.description - schemas.LearningGenaiRootHarm.properties.threshold.format - schemas.LearningGenaiRootHarm.properties.threshold.type - schemas.LearningGenaiRootHarm.properties.videoFrameChild.type - schemas.LearningGenaiRootHarm.properties.videoFrameCsam.type - schemas.LearningGenaiRootHarm.properties.videoFramePedo.type - schemas.LearningGenaiRootHarm.properties.videoFramePorn.description - schemas.LearningGenaiRootHarm.properties.videoFramePorn.type - schemas.LearningGenaiRootHarm.properties.videoFrameViolence.type - schemas.LearningGenaiRootHarm.type - schemas.LearningGenaiRootHarmGrailImageHarmType.description - schemas.LearningGenaiRootHarmGrailImageHarmType.id - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.items.enum - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.items.enumDescriptions - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.items.type - schemas.LearningGenaiRootHarmGrailImageHarmType.properties.imageHarmType.type - schemas.LearningGenaiRootHarmGrailImageHarmType.type - schemas.LearningGenaiRootHarmGrailTextHarmType.description - schemas.LearningGenaiRootHarmGrailTextHarmType.id - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.items.enum - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.items.enumDescriptions - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.items.type - schemas.LearningGenaiRootHarmGrailTextHarmType.properties.harmType.type - schemas.LearningGenaiRootHarmGrailTextHarmType.type - schemas.LearningGenaiRootHarmSafetyCatCategories.description - schemas.LearningGenaiRootHarmSafetyCatCategories.id - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.items.enum - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.items.enumDescriptions - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.items.type - schemas.LearningGenaiRootHarmSafetyCatCategories.properties.categories.type - schemas.LearningGenaiRootHarmSafetyCatCategories.type - schemas.LearningGenaiRootHarmSpiiFilter.description - schemas.LearningGenaiRootHarmSpiiFilter.id - schemas.LearningGenaiRootHarmSpiiFilter.properties.usBankRoutingMicr.type - schemas.LearningGenaiRootHarmSpiiFilter.properties.usEmployerIdentificationNumber.type - schemas.LearningGenaiRootHarmSpiiFilter.properties.usSocialSecurityNumber.type - schemas.LearningGenaiRootHarmSpiiFilter.type - schemas.LearningGenaiRootInternalMetadata.id - schemas.LearningGenaiRootInternalMetadata.properties.scoredTokens.items.$ref - schemas.LearningGenaiRootInternalMetadata.properties.scoredTokens.type - schemas.LearningGenaiRootInternalMetadata.type - schemas.LearningGenaiRootLanguageFilterResult.id - schemas.LearningGenaiRootLanguageFilterResult.properties.allowed.description - schemas.LearningGenaiRootLanguageFilterResult.properties.allowed.type - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguage.description - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguage.type - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguageProbability.description - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguageProbability.format - schemas.LearningGenaiRootLanguageFilterResult.properties.detectedLanguageProbability.type - schemas.LearningGenaiRootLanguageFilterResult.type - schemas.LearningGenaiRootMetricOutput.id - schemas.LearningGenaiRootMetricOutput.properties.debug.type - schemas.LearningGenaiRootMetricOutput.properties.name.description - schemas.LearningGenaiRootMetricOutput.properties.name.type - schemas.LearningGenaiRootMetricOutput.properties.numericValue.format - schemas.LearningGenaiRootMetricOutput.properties.numericValue.type - schemas.LearningGenaiRootMetricOutput.properties.status.$ref - schemas.LearningGenaiRootMetricOutput.properties.stringValue.type - schemas.LearningGenaiRootMetricOutput.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.id - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.additionalProperties.format - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.additionalProperties.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.description - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.factRetrievalMillisecondsByProvider.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.prompt2queryMilliseconds.description - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.prompt2queryMilliseconds.format - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.prompt2queryMilliseconds.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.retrievalAugmentMilliseconds.description - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.retrievalAugmentMilliseconds.format - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.properties.retrievalAugmentMilliseconds.type - schemas.LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata.type - schemas.LearningGenaiRootRAIOutput.description - schemas.LearningGenaiRootRAIOutput.id - schemas.LearningGenaiRootRAIOutput.properties.allowed.type - schemas.LearningGenaiRootRAIOutput.properties.harm.$ref - schemas.LearningGenaiRootRAIOutput.properties.name.type - schemas.LearningGenaiRootRAIOutput.properties.score.format - schemas.LearningGenaiRootRAIOutput.properties.score.type - schemas.LearningGenaiRootRAIOutput.type - schemas.LearningGenaiRootRegexTakedownResult.id - schemas.LearningGenaiRootRegexTakedownResult.properties.allowed.description - schemas.LearningGenaiRootRegexTakedownResult.properties.allowed.type - schemas.LearningGenaiRootRegexTakedownResult.properties.takedownRegex.description - schemas.LearningGenaiRootRegexTakedownResult.properties.takedownRegex.type - schemas.LearningGenaiRootRegexTakedownResult.type - schemas.LearningGenaiRootRequestMetrics.id - schemas.LearningGenaiRootRequestMetrics.properties.audioMetrics.$ref - schemas.LearningGenaiRootRequestMetrics.properties.audioMetrics.description - schemas.LearningGenaiRootRequestMetrics.properties.imageMetrics.$ref - schemas.LearningGenaiRootRequestMetrics.properties.imageMetrics.description - schemas.LearningGenaiRootRequestMetrics.properties.textTokenCount.description - schemas.LearningGenaiRootRequestMetrics.properties.textTokenCount.format - schemas.LearningGenaiRootRequestMetrics.properties.textTokenCount.type - schemas.LearningGenaiRootRequestMetrics.properties.totalTokenCount.description - schemas.LearningGenaiRootRequestMetrics.properties.totalTokenCount.format - schemas.LearningGenaiRootRequestMetrics.properties.totalTokenCount.type - schemas.LearningGenaiRootRequestMetrics.properties.videoMetrics.$ref - schemas.LearningGenaiRootRequestMetrics.properties.videoMetrics.description - schemas.LearningGenaiRootRequestMetrics.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.id - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioDuration.description - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioDuration.format - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioDuration.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioTokenCount.description - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioTokenCount.format - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.audioTokenCount.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.numAudioFrames.description - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.numAudioFrames.format - schemas.LearningGenaiRootRequestMetricsAudioMetrics.properties.numAudioFrames.type - schemas.LearningGenaiRootRequestMetricsAudioMetrics.type - schemas.LearningGenaiRootRequestMetricsImageMetrics.id - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.imageTokenCount.description - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.imageTokenCount.format - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.imageTokenCount.type - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.numImages.description - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.numImages.format - schemas.LearningGenaiRootRequestMetricsImageMetrics.properties.numImages.type - schemas.LearningGenaiRootRequestMetricsImageMetrics.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.id - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.audioSample.$ref - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.audioSample.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.numVideoFrames.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.numVideoFrames.format - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.numVideoFrames.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoDuration.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoDuration.format - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoDuration.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoFramesTokenCount.description - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoFramesTokenCount.format - schemas.LearningGenaiRootRequestMetricsVideoMetrics.properties.videoFramesTokenCount.type - schemas.LearningGenaiRootRequestMetricsVideoMetrics.type - schemas.LearningGenaiRootRequestResponseTakedownResult.id - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.allowed.description - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.allowed.type - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.requestTakedownRegex.description - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.requestTakedownRegex.type - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.responseTakedownRegex.description - schemas.LearningGenaiRootRequestResponseTakedownResult.properties.responseTakedownRegex.type - schemas.LearningGenaiRootRequestResponseTakedownResult.type - schemas.LearningGenaiRootRoutingDecision.description - schemas.LearningGenaiRootRoutingDecision.id - schemas.LearningGenaiRootRoutingDecision.properties.metadata.$ref - schemas.LearningGenaiRootRoutingDecision.properties.modelConfigId.description - schemas.LearningGenaiRootRoutingDecision.properties.modelConfigId.type - schemas.LearningGenaiRootRoutingDecision.type - schemas.LearningGenaiRootRoutingDecisionMetadata.description - schemas.LearningGenaiRootRoutingDecisionMetadata.id - schemas.LearningGenaiRootRoutingDecisionMetadata.properties.scoreBasedRoutingMetadata.$ref - schemas.LearningGenaiRootRoutingDecisionMetadata.properties.tokenLengthBasedRoutingMetadata.$ref - schemas.LearningGenaiRootRoutingDecisionMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.id - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.matchedRule.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.matchedRule.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.score.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.score.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.usedDefaultFallback.description - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.properties.usedDefaultFallback.type - schemas.LearningGenaiRootRoutingDecisionMetadataScoreBased.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.id - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelInputTokenMetadata.items.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelInputTokenMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelMaxTokenMetadata.items.$ref - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.properties.modelMaxTokenMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBased.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.id - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.computedInputTokenLength.description - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.computedInputTokenLength.format - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.computedInputTokenLength.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.modelId.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.pickedAsFallback.description - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.pickedAsFallback.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.selected.description - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.properties.selected.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.id - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumInputTokens.format - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumInputTokens.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumOutputTokens.format - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.maxNumOutputTokens.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.properties.modelId.type - schemas.LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata.type - schemas.LearningGenaiRootRuleOutput.id - schemas.LearningGenaiRootRuleOutput.properties.decision.enum - schemas.LearningGenaiRootRuleOutput.properties.decision.enumDescriptions - schemas.LearningGenaiRootRuleOutput.properties.decision.type - schemas.LearningGenaiRootRuleOutput.properties.name.type - schemas.LearningGenaiRootRuleOutput.type - schemas.LearningGenaiRootScore.id - schemas.LearningGenaiRootScore.properties.calculationType.$ref - schemas.LearningGenaiRootScore.properties.internalMetadata.$ref - schemas.LearningGenaiRootScore.properties.internalMetadata.description - schemas.LearningGenaiRootScore.properties.thresholdType.$ref - schemas.LearningGenaiRootScore.properties.tokensAndLogprobPerDecodingStep.$ref - schemas.LearningGenaiRootScore.properties.tokensAndLogprobPerDecodingStep.description - schemas.LearningGenaiRootScore.properties.value.format - schemas.LearningGenaiRootScore.properties.value.type - schemas.LearningGenaiRootScore.type - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.id - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.equalOrGreaterThan.$ref - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.equalOrGreaterThan.description - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.lessThan.$ref - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.modelConfigId.description - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.properties.modelConfigId.type - schemas.LearningGenaiRootScoreBasedRoutingConfigRule.type - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.description - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.id - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.properties.phrase.$ref - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.properties.similarityScore.format - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.properties.similarityScore.type - schemas.LearningGenaiRootScoredSimilarityTakedownPhrase.type - schemas.LearningGenaiRootScoredToken.description - schemas.LearningGenaiRootScoredToken.id - schemas.LearningGenaiRootScoredToken.properties.endTokenScore.description - schemas.LearningGenaiRootScoredToken.properties.endTokenScore.format - schemas.LearningGenaiRootScoredToken.properties.endTokenScore.type - schemas.LearningGenaiRootScoredToken.properties.score.description - schemas.LearningGenaiRootScoredToken.properties.score.format - schemas.LearningGenaiRootScoredToken.properties.score.type - schemas.LearningGenaiRootScoredToken.properties.token.type - schemas.LearningGenaiRootScoredToken.type - schemas.LearningGenaiRootSimilarityTakedownPhrase.description - schemas.LearningGenaiRootSimilarityTakedownPhrase.id - schemas.LearningGenaiRootSimilarityTakedownPhrase.properties.blockedPhrase.type - schemas.LearningGenaiRootSimilarityTakedownPhrase.type - schemas.LearningGenaiRootSimilarityTakedownResult.id - schemas.LearningGenaiRootSimilarityTakedownResult.properties.allowed.description - schemas.LearningGenaiRootSimilarityTakedownResult.properties.allowed.type - schemas.LearningGenaiRootSimilarityTakedownResult.properties.scoredPhrases.description - schemas.LearningGenaiRootSimilarityTakedownResult.properties.scoredPhrases.items.$ref - schemas.LearningGenaiRootSimilarityTakedownResult.properties.scoredPhrases.type - schemas.LearningGenaiRootSimilarityTakedownResult.type - schemas.LearningGenaiRootTakedownResult.id - schemas.LearningGenaiRootTakedownResult.properties.allowed.description - schemas.LearningGenaiRootTakedownResult.properties.allowed.type - schemas.LearningGenaiRootTakedownResult.properties.regexTakedownResult.$ref - schemas.LearningGenaiRootTakedownResult.properties.requestResponseTakedownResult.$ref - schemas.LearningGenaiRootTakedownResult.properties.similarityTakedownResult.$ref - schemas.LearningGenaiRootTakedownResult.type - schemas.LearningGenaiRootThresholdType.description - schemas.LearningGenaiRootThresholdType.id - schemas.LearningGenaiRootThresholdType.properties.scoreType.enum - schemas.LearningGenaiRootThresholdType.properties.scoreType.enumDescriptions - schemas.LearningGenaiRootThresholdType.properties.scoreType.type - schemas.LearningGenaiRootThresholdType.properties.threshold.format - schemas.LearningGenaiRootThresholdType.properties.threshold.type - schemas.LearningGenaiRootThresholdType.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.id - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.chosenCandidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.chosenCandidates.items.$ref - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.chosenCandidates.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.topCandidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.topCandidates.items.$ref - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.properties.topCandidates.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStep.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.id - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.logProbability.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.logProbability.format - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.logProbability.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.token.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.properties.token.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.id - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.properties.candidates.description - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.properties.candidates.items.$ref - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.properties.candidates.type - schemas.LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates.type - schemas.LearningGenaiRootToxicityResult.description - schemas.LearningGenaiRootToxicityResult.id - schemas.LearningGenaiRootToxicityResult.properties.signals.items.$ref - schemas.LearningGenaiRootToxicityResult.properties.signals.type - schemas.LearningGenaiRootToxicityResult.type - schemas.LearningGenaiRootToxicitySignal.description - schemas.LearningGenaiRootToxicitySignal.id - schemas.LearningGenaiRootToxicitySignal.properties.allowed.type - schemas.LearningGenaiRootToxicitySignal.properties.label.enum - schemas.LearningGenaiRootToxicitySignal.properties.label.enumDescriptions - schemas.LearningGenaiRootToxicitySignal.properties.label.type - schemas.LearningGenaiRootToxicitySignal.properties.score.format - schemas.LearningGenaiRootToxicitySignal.properties.score.type - schemas.LearningGenaiRootToxicitySignal.type - schemas.LearningGenaiRootTranslationRequestInfo.description - schemas.LearningGenaiRootTranslationRequestInfo.id - schemas.LearningGenaiRootTranslationRequestInfo.properties.detectedLanguageCodes.description - schemas.LearningGenaiRootTranslationRequestInfo.properties.detectedLanguageCodes.items.type - schemas.LearningGenaiRootTranslationRequestInfo.properties.detectedLanguageCodes.type - schemas.LearningGenaiRootTranslationRequestInfo.properties.totalContentSize.description - schemas.LearningGenaiRootTranslationRequestInfo.properties.totalContentSize.format - schemas.LearningGenaiRootTranslationRequestInfo.properties.totalContentSize.type - schemas.LearningGenaiRootTranslationRequestInfo.type - schemas.LearningServingLlmAtlasOutputMetadata.id - schemas.LearningServingLlmAtlasOutputMetadata.properties.requestTopic.type - schemas.LearningServingLlmAtlasOutputMetadata.properties.source.enum - schemas.LearningServingLlmAtlasOutputMetadata.properties.source.enumDescriptions - schemas.LearningServingLlmAtlasOutputMetadata.properties.source.type - schemas.LearningServingLlmAtlasOutputMetadata.type - schemas.LearningServingLlmMessageMetadata.description - schemas.LearningServingLlmMessageMetadata.id - schemas.LearningServingLlmMessageMetadata.properties.atlasMetadata.$ref - schemas.LearningServingLlmMessageMetadata.properties.classifierSummary.$ref - schemas.LearningServingLlmMessageMetadata.properties.classifierSummary.description - schemas.LearningServingLlmMessageMetadata.properties.codeyOutput.$ref - schemas.LearningServingLlmMessageMetadata.properties.codeyOutput.description - schemas.LearningServingLlmMessageMetadata.properties.currentStreamTextLength.format - schemas.LearningServingLlmMessageMetadata.properties.currentStreamTextLength.type - schemas.LearningServingLlmMessageMetadata.properties.deleted.description - schemas.LearningServingLlmMessageMetadata.properties.deleted.type - schemas.LearningServingLlmMessageMetadata.properties.filterMeta.description - schemas.LearningServingLlmMessageMetadata.properties.filterMeta.items.$ref - schemas.LearningServingLlmMessageMetadata.properties.filterMeta.type - schemas.LearningServingLlmMessageMetadata.properties.finalMessageScore.$ref - schemas.LearningServingLlmMessageMetadata.properties.finalMessageScore.description - schemas.LearningServingLlmMessageMetadata.properties.finishReason.description - schemas.LearningServingLlmMessageMetadata.properties.finishReason.enum - schemas.LearningServingLlmMessageMetadata.properties.finishReason.enumDescriptions - schemas.LearningServingLlmMessageMetadata.properties.finishReason.type - schemas.LearningServingLlmMessageMetadata.properties.groundingMetadata.$ref - schemas.LearningServingLlmMessageMetadata.properties.isCode.description - schemas.LearningServingLlmMessageMetadata.properties.isCode.type - schemas.LearningServingLlmMessageMetadata.properties.isFallback.description - schemas.LearningServingLlmMessageMetadata.properties.isFallback.type - schemas.LearningServingLlmMessageMetadata.properties.langidResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.langidResult.description - schemas.LearningServingLlmMessageMetadata.properties.language.description - schemas.LearningServingLlmMessageMetadata.properties.language.type - schemas.LearningServingLlmMessageMetadata.properties.lmPrefix.description - schemas.LearningServingLlmMessageMetadata.properties.lmPrefix.type - schemas.LearningServingLlmMessageMetadata.properties.lmrootInternalRequestMetrics.$ref - schemas.LearningServingLlmMessageMetadata.properties.lmrootInternalRequestMetrics.description - schemas.LearningServingLlmMessageMetadata.properties.mmRecitationResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.mmRecitationResult.description - schemas.LearningServingLlmMessageMetadata.properties.numRewinds.description - schemas.LearningServingLlmMessageMetadata.properties.numRewinds.format - schemas.LearningServingLlmMessageMetadata.properties.numRewinds.type - schemas.LearningServingLlmMessageMetadata.properties.originalText.description - schemas.LearningServingLlmMessageMetadata.properties.originalText.type - schemas.LearningServingLlmMessageMetadata.properties.perStreamDecodedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.perStreamDecodedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.perStreamDecodedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.perStreamReturnedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.perStreamReturnedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.perStreamReturnedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.raiOutputs.description - schemas.LearningServingLlmMessageMetadata.properties.raiOutputs.items.$ref - schemas.LearningServingLlmMessageMetadata.properties.raiOutputs.type - schemas.LearningServingLlmMessageMetadata.properties.recitationResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.recitationResult.description - schemas.LearningServingLlmMessageMetadata.properties.scores.description - schemas.LearningServingLlmMessageMetadata.properties.scores.items.$ref - schemas.LearningServingLlmMessageMetadata.properties.scores.type - schemas.LearningServingLlmMessageMetadata.properties.streamTerminated.description - schemas.LearningServingLlmMessageMetadata.properties.streamTerminated.type - schemas.LearningServingLlmMessageMetadata.properties.totalDecodedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.totalDecodedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.totalDecodedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.totalReturnedTokenCount.description - schemas.LearningServingLlmMessageMetadata.properties.totalReturnedTokenCount.format - schemas.LearningServingLlmMessageMetadata.properties.totalReturnedTokenCount.type - schemas.LearningServingLlmMessageMetadata.properties.translatedUserPrompts.description - schemas.LearningServingLlmMessageMetadata.properties.translatedUserPrompts.items.type - schemas.LearningServingLlmMessageMetadata.properties.translatedUserPrompts.type - schemas.LearningServingLlmMessageMetadata.properties.vertexRaiResult.$ref - schemas.LearningServingLlmMessageMetadata.properties.vertexRaiResult.description - schemas.LearningServingLlmMessageMetadata.type - schemas.NlpSaftLangIdLocalesResult.id - schemas.NlpSaftLangIdLocalesResult.properties.predictions.description - schemas.NlpSaftLangIdLocalesResult.properties.predictions.items.$ref - schemas.NlpSaftLangIdLocalesResult.properties.predictions.type - schemas.NlpSaftLangIdLocalesResult.type - schemas.NlpSaftLangIdLocalesResultLocale.id - schemas.NlpSaftLangIdLocalesResultLocale.properties.languageCode.description - schemas.NlpSaftLangIdLocalesResultLocale.properties.languageCode.type - schemas.NlpSaftLangIdLocalesResultLocale.type - schemas.NlpSaftLangIdResult.id - schemas.NlpSaftLangIdResult.properties.modelVersion.description - schemas.NlpSaftLangIdResult.properties.modelVersion.enum - schemas.NlpSaftLangIdResult.properties.modelVersion.enumDescriptions - schemas.NlpSaftLangIdResult.properties.modelVersion.type - schemas.NlpSaftLangIdResult.properties.predictions.description - schemas.NlpSaftLangIdResult.properties.predictions.items.$ref - schemas.NlpSaftLangIdResult.properties.predictions.type - schemas.NlpSaftLangIdResult.properties.spanPredictions.description - schemas.NlpSaftLangIdResult.properties.spanPredictions.items.$ref - schemas.NlpSaftLangIdResult.properties.spanPredictions.type - schemas.NlpSaftLangIdResult.type - schemas.NlpSaftLanguageSpan.id - schemas.NlpSaftLanguageSpan.properties.end.format - schemas.NlpSaftLanguageSpan.properties.end.type - schemas.NlpSaftLanguageSpan.properties.languageCode.description - schemas.NlpSaftLanguageSpan.properties.languageCode.type - schemas.NlpSaftLanguageSpan.properties.locales.$ref - schemas.NlpSaftLanguageSpan.properties.locales.description - schemas.NlpSaftLanguageSpan.properties.probability.description - schemas.NlpSaftLanguageSpan.properties.probability.format - schemas.NlpSaftLanguageSpan.properties.probability.type - schemas.NlpSaftLanguageSpan.properties.start.description - schemas.NlpSaftLanguageSpan.properties.start.format - schemas.NlpSaftLanguageSpan.properties.start.type - schemas.NlpSaftLanguageSpan.type - schemas.NlpSaftLanguageSpanSequence.id - schemas.NlpSaftLanguageSpanSequence.properties.languageSpans.description - schemas.NlpSaftLanguageSpanSequence.properties.languageSpans.items.$ref - schemas.NlpSaftLanguageSpanSequence.properties.languageSpans.type - schemas.NlpSaftLanguageSpanSequence.properties.probability.description - schemas.NlpSaftLanguageSpanSequence.properties.probability.format - schemas.NlpSaftLanguageSpanSequence.properties.probability.type - schemas.NlpSaftLanguageSpanSequence.type - schemas.Proto2BridgeMessageSet.description - schemas.Proto2BridgeMessageSet.id - schemas.Proto2BridgeMessageSet.type - schemas.UtilStatusProto.description - schemas.UtilStatusProto.id - schemas.UtilStatusProto.properties.canonicalCode.description - schemas.UtilStatusProto.properties.canonicalCode.format - schemas.UtilStatusProto.properties.canonicalCode.type - schemas.UtilStatusProto.properties.code.description - schemas.UtilStatusProto.properties.code.format - schemas.UtilStatusProto.properties.code.type - schemas.UtilStatusProto.properties.message.description - schemas.UtilStatusProto.properties.message.type - schemas.UtilStatusProto.properties.messageSet.$ref - schemas.UtilStatusProto.properties.messageSet.description - schemas.UtilStatusProto.properties.space.description - schemas.UtilStatusProto.properties.space.type - schemas.UtilStatusProto.type The following keys were added: - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQuery.properties.rrf.$ref - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQuery.properties.rrf.description - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF.description - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF.id - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF.properties.alpha.description - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF.properties.alpha.format - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF.properties.alpha.type - schemas.GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF.type - schemas.GoogleCloudAiplatformV1FindNeighborsResponseNeighbor.properties.sparseDistance.description - schemas.GoogleCloudAiplatformV1FindNeighborsResponseNeighbor.properties.sparseDistance.format - schemas.GoogleCloudAiplatformV1FindNeighborsResponseNeighbor.properties.sparseDistance.type - schemas.GoogleCloudAiplatformV1IndexDatapoint.properties.sparseEmbedding.$ref - schemas.GoogleCloudAiplatformV1IndexDatapoint.properties.sparseEmbedding.description - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.description - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.id - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.dimensions.description - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.dimensions.items.format - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.dimensions.items.type - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.dimensions.type - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.values.description - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.values.items.format - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.values.items.type - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.properties.values.type - schemas.GoogleCloudAiplatformV1IndexDatapointSparseEmbedding.type - schemas.GoogleCloudAiplatformV1IndexStats.properties.sparseVectorsCount.description - schemas.GoogleCloudAiplatformV1IndexStats.properties.sparseVectorsCount.format - schemas.GoogleCloudAiplatformV1IndexStats.properties.sparseVectorsCount.readOnly - schemas.GoogleCloudAiplatformV1IndexStats.properties.sparseVectorsCount.type The following keys were changed: - schemas.GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError.properties.errorType.enum - schemas.GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError.properties.errorType.enumDescriptions --- discovery/aiplatform-v1.json | 9289 +++---------- discovery/aiplatform-v1beta1.json | 8441 ++---------- src/apis/aiplatform/v1.ts | 8153 +++++------ src/apis/aiplatform/v1beta1.ts | 20477 +++++++++++++--------------- 4 files changed, 15316 insertions(+), 31044 deletions(-) diff --git a/discovery/aiplatform-v1.json b/discovery/aiplatform-v1.json index 4a6ce8d263..a09953736b 100644 --- a/discovery/aiplatform-v1.json +++ b/discovery/aiplatform-v1.json @@ -21,8 +21,8 @@ "endpoints": [ { "location": "africa-south1", - "endpointUrl": "https://africa-south1-aiplatform.googleapis.com/", - "description": "Locational Endpoint" + "description": "Locational Endpoint", + "endpointUrl": "https://africa-south1-aiplatform.googleapis.com/" }, { "endpointUrl": "https://asia-east1-aiplatform.googleapis.com/", @@ -31,18 +31,18 @@ }, { "endpointUrl": "https://asia-east2-aiplatform.googleapis.com/", - "description": "Locational Endpoint", - "location": "asia-east2" + "location": "asia-east2", + "description": "Locational Endpoint" }, { - "endpointUrl": "https://asia-northeast1-aiplatform.googleapis.com/", "description": "Locational Endpoint", - "location": "asia-northeast1" + "location": "asia-northeast1", + "endpointUrl": "https://asia-northeast1-aiplatform.googleapis.com/" }, { - "description": "Locational Endpoint", "location": "asia-northeast2", - "endpointUrl": "https://asia-northeast2-aiplatform.googleapis.com/" + "endpointUrl": "https://asia-northeast2-aiplatform.googleapis.com/", + "description": "Locational Endpoint" }, { "endpointUrl": "https://asia-northeast3-aiplatform.googleapis.com/", @@ -50,49 +50,49 @@ "location": "asia-northeast3" }, { - "endpointUrl": "https://asia-south1-aiplatform.googleapis.com/", + "location": "asia-south1", "description": "Locational Endpoint", - "location": "asia-south1" + "endpointUrl": "https://asia-south1-aiplatform.googleapis.com/" }, { "description": "Locational Endpoint", - "location": "asia-southeast1", - "endpointUrl": "https://asia-southeast1-aiplatform.googleapis.com/" + "endpointUrl": "https://asia-southeast1-aiplatform.googleapis.com/", + "location": "asia-southeast1" }, { "endpointUrl": "https://asia-southeast2-aiplatform.googleapis.com/", - "description": "Locational Endpoint", - "location": "asia-southeast2" + "location": "asia-southeast2", + "description": "Locational Endpoint" }, { - "location": "australia-southeast1", "endpointUrl": "https://australia-southeast1-aiplatform.googleapis.com/", + "location": "australia-southeast1", "description": "Locational Endpoint" }, { - "description": "Locational Endpoint", "endpointUrl": "https://australia-southeast2-aiplatform.googleapis.com/", - "location": "australia-southeast2" + "location": "australia-southeast2", + "description": "Locational Endpoint" }, { - "endpointUrl": "https://europe-central2-aiplatform.googleapis.com/", "location": "europe-central2", + "endpointUrl": "https://europe-central2-aiplatform.googleapis.com/", "description": "Locational Endpoint" }, { - "description": "Locational Endpoint", + "endpointUrl": "https://europe-north1-aiplatform.googleapis.com/", "location": "europe-north1", - "endpointUrl": "https://europe-north1-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { + "endpointUrl": "https://europe-southwest1-aiplatform.googleapis.com/", "location": "europe-southwest1", - "description": "Locational Endpoint", - "endpointUrl": "https://europe-southwest1-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { "location": "europe-west1", - "endpointUrl": "https://europe-west1-aiplatform.googleapis.com/", - "description": "Locational Endpoint" + "description": "Locational Endpoint", + "endpointUrl": "https://europe-west1-aiplatform.googleapis.com/" }, { "endpointUrl": "https://europe-west2-aiplatform.googleapis.com/", @@ -101,43 +101,43 @@ }, { "description": "Locational Endpoint", - "endpointUrl": "https://europe-west3-aiplatform.googleapis.com/", - "location": "europe-west3" + "location": "europe-west3", + "endpointUrl": "https://europe-west3-aiplatform.googleapis.com/" }, { - "location": "europe-west4", + "description": "Locational Endpoint", "endpointUrl": "https://europe-west4-aiplatform.googleapis.com/", - "description": "Locational Endpoint" + "location": "europe-west4" }, { - "description": "Locational Endpoint", + "endpointUrl": "https://europe-west6-aiplatform.googleapis.com/", "location": "europe-west6", - "endpointUrl": "https://europe-west6-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { "description": "Locational Endpoint", - "location": "europe-west8", - "endpointUrl": "https://europe-west8-aiplatform.googleapis.com/" + "endpointUrl": "https://europe-west8-aiplatform.googleapis.com/", + "location": "europe-west8" }, { - "location": "europe-west9", "description": "Locational Endpoint", - "endpointUrl": "https://europe-west9-aiplatform.googleapis.com/" + "endpointUrl": "https://europe-west9-aiplatform.googleapis.com/", + "location": "europe-west9" }, { + "endpointUrl": "https://europe-west12-aiplatform.googleapis.com/", "description": "Locational Endpoint", - "location": "europe-west12", - "endpointUrl": "https://europe-west12-aiplatform.googleapis.com/" + "location": "europe-west12" }, { - "location": "me-central1", "endpointUrl": "https://me-central1-aiplatform.googleapis.com/", + "location": "me-central1", "description": "Locational Endpoint" }, { + "endpointUrl": "https://me-central2-aiplatform.googleapis.com/", "location": "me-central2", - "description": "Locational Endpoint", - "endpointUrl": "https://me-central2-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { "location": "me-west1", @@ -145,58 +145,58 @@ "endpointUrl": "https://me-west1-aiplatform.googleapis.com/" }, { + "endpointUrl": "https://northamerica-northeast1-aiplatform.googleapis.com/", "description": "Locational Endpoint", - "location": "northamerica-northeast1", - "endpointUrl": "https://northamerica-northeast1-aiplatform.googleapis.com/" + "location": "northamerica-northeast1" }, { + "location": "northamerica-northeast2", "description": "Locational Endpoint", - "endpointUrl": "https://northamerica-northeast2-aiplatform.googleapis.com/", - "location": "northamerica-northeast2" + "endpointUrl": "https://northamerica-northeast2-aiplatform.googleapis.com/" }, { - "location": "southamerica-east1", "description": "Locational Endpoint", + "location": "southamerica-east1", "endpointUrl": "https://southamerica-east1-aiplatform.googleapis.com/" }, { - "location": "southamerica-west1", + "endpointUrl": "https://southamerica-west1-aiplatform.googleapis.com/", "description": "Locational Endpoint", - "endpointUrl": "https://southamerica-west1-aiplatform.googleapis.com/" + "location": "southamerica-west1" }, { - "location": "us-central1", "endpointUrl": "https://us-central1-aiplatform.googleapis.com/", + "location": "us-central1", "description": "Locational Endpoint" }, { "description": "Locational Endpoint", - "endpointUrl": "https://us-central2-aiplatform.googleapis.com/", - "location": "us-central2" + "location": "us-central2", + "endpointUrl": "https://us-central2-aiplatform.googleapis.com/" }, { "location": "us-east1", - "description": "Locational Endpoint", - "endpointUrl": "https://us-east1-aiplatform.googleapis.com/" + "endpointUrl": "https://us-east1-aiplatform.googleapis.com/", + "description": "Locational Endpoint" }, { + "endpointUrl": "https://us-east4-aiplatform.googleapis.com/", "location": "us-east4", - "description": "Locational Endpoint", - "endpointUrl": "https://us-east4-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { - "location": "us-south1", "endpointUrl": "https://us-south1-aiplatform.googleapis.com/", - "description": "Locational Endpoint" + "description": "Locational Endpoint", + "location": "us-south1" }, { "endpointUrl": "https://us-west1-aiplatform.googleapis.com/", - "location": "us-west1", - "description": "Locational Endpoint" + "description": "Locational Endpoint", + "location": "us-west1" }, { - "location": "us-west2", "description": "Locational Endpoint", + "location": "us-west2", "endpointUrl": "https://us-west2-aiplatform.googleapis.com/" }, { @@ -205,14 +205,14 @@ "endpointUrl": "https://us-west3-aiplatform.googleapis.com/" }, { - "location": "us-west4", "endpointUrl": "https://us-west4-aiplatform.googleapis.com/", - "description": "Locational Endpoint" + "description": "Locational Endpoint", + "location": "us-west4" }, { + "location": "us-east5", "endpointUrl": "https://us-east5-aiplatform.googleapis.com/", - "description": "Locational Endpoint", - "location": "us-east5" + "description": "Locational Endpoint" } ], "fullyEncodeReservedExpansion": true, @@ -16238,23 +16238,9 @@ } } }, - "revision": "20240507", + "revision": "20240510", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { - "CloudAiLargeModelsVisionEmbedVideoResponse": { - "description": "Video embedding response.", - "id": "CloudAiLargeModelsVisionEmbedVideoResponse", - "properties": { - "videoEmbeddings": { - "description": "The embedding vector for the video.", - "items": { - "type": "any" - }, - "type": "array" - } - }, - "type": "object" - }, "CloudAiLargeModelsVisionFilteredText": { "description": "Details for filtered input text.", "id": "CloudAiLargeModelsVisionFilteredText", @@ -16464,17 +16450,6 @@ }, "type": "object" }, - "CloudAiLargeModelsVisionMediaGenerateContentResponse": { - "description": "Generate media content response", - "id": "CloudAiLargeModelsVisionMediaGenerateContentResponse", - "properties": { - "response": { - "$ref": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse", - "description": "Response to the user's request." - } - }, - "type": "object" - }, "CloudAiLargeModelsVisionNamedBoundingBox": { "id": "CloudAiLargeModelsVisionNamedBoundingBox", "properties": { @@ -16537,52 +16512,6 @@ }, "type": "object" }, - "CloudAiLargeModelsVisionReasonVideoResponse": { - "description": "Video reasoning response.", - "id": "CloudAiLargeModelsVisionReasonVideoResponse", - "properties": { - "responses": { - "description": "Generated text responses. The generated responses for different segments within the same video.", - "items": { - "$ref": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse" - }, - "type": "array" - } - }, - "type": "object" - }, - "CloudAiLargeModelsVisionReasonVideoResponseTextResponse": { - "description": "Contains text that is the response of the video captioning.", - "id": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse", - "properties": { - "relativeTemporalPartition": { - "$ref": "CloudAiLargeModelsVisionRelativeTemporalPartition", - "description": "Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video." - }, - "text": { - "description": "Text information", - "type": "string" - } - }, - "type": "object" - }, - "CloudAiLargeModelsVisionRelativeTemporalPartition": { - "description": "For ease of use, assume that the start_offset is inclusive and the end_offset is exclusive. In mathematical terms, the partition would be written as [start_offset, end_offset).", - "id": "CloudAiLargeModelsVisionRelativeTemporalPartition", - "properties": { - "endOffset": { - "description": "End time offset of the partition.", - "format": "google-duration", - "type": "string" - }, - "startOffset": { - "description": "Start time offset of the partition.", - "format": "google-duration", - "type": "string" - } - }, - "type": "object" - }, "CloudAiLargeModelsVisionSemanticFilterResponse": { "id": "CloudAiLargeModelsVisionSemanticFilterResponse", "properties": { @@ -16616,1864 +16545,1103 @@ }, "type": "object" }, - "CloudAiNlLlmProtoServiceCandidate": { - "id": "CloudAiNlLlmProtoServiceCandidate", + "GoogleApiHttpBody": { + "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.", + "id": "GoogleApiHttpBody", "properties": { - "citationMetadata": { - "$ref": "CloudAiNlLlmProtoServiceCitationMetadata", - "description": "Source attribution of the generated content." - }, - "content": { - "$ref": "CloudAiNlLlmProtoServiceContent", - "description": "Content of the candidate." - }, - "finishMessage": { - "description": "A string that describes the filtering behavior in more detail. Only filled when reason is set.", + "contentType": { + "description": "The HTTP Content-Type header value specifying the content type of the body.", "type": "string" }, - "finishReason": { - "description": "The reason why the model stopped generating tokens.", - "enum": [ - "FINISH_REASON_UNSPECIFIED", - "FINISH_REASON_STOP", - "FINISH_REASON_MAX_TOKENS", - "FINISH_REASON_SAFETY", - "FINISH_REASON_RECITATION", - "FINISH_REASON_OTHER", - "FINISH_REASON_BLOCKLIST", - "FINISH_REASON_PROHIBITED_CONTENT", - "FINISH_REASON_SPII" - ], - "enumDescriptions": [ - "The finish reason is unspecified.", - "Natural stop point of the model or provided stop sequence.", - "The maximum number of tokens as specified in the request was reached.", - "The token generation was stopped as the response was flagged for safety reasons. NOTE: When streaming the Candidate.content will be empty if content filters blocked the output.", - "The token generation was stopped as the response was flagged for unauthorized citations.", - "All other reasons that stopped the token generation (currently only language filter).", - "The token generation was stopped as the response was flagged for the terms which are included from the terminology blocklist.", - "The token generation was stopped as the response was flagged for the prohibited contents (currently only CSAM).", - "The token generation was stopped as the response was flagged for Sensitive Personally Identifiable Information (SPII) contents." - ], + "data": { + "description": "The HTTP request/response body as raw binary.", + "format": "byte", "type": "string" }, - "groundingMetadata": { - "$ref": "LearningGenaiRootGroundingMetadata", - "description": "Grounding metadata. Combine with the facts list from response to generate grounding citations for this choice." - }, - "index": { - "description": "Index of the candidate.", - "format": "int32", - "type": "integer" - }, - "safetyRatings": { - "description": "Safety ratings of the generated content.", + "extensions": { + "description": "Application specific response metadata. Must be set in the first response for streaming APIs.", "items": { - "$ref": "CloudAiNlLlmProtoServiceSafetyRating" + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" }, "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceCitation": { - "description": "Source attributions for content.", - "id": "CloudAiNlLlmProtoServiceCitation", + "GoogleCloudAiplatformV1ActiveLearningConfig": { + "description": "Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy.", + "id": "GoogleCloudAiplatformV1ActiveLearningConfig", "properties": { - "endIndex": { - "description": "End index into the content.", - "format": "int32", - "type": "integer" - }, - "license": { - "description": "License of the attribution.", + "maxDataItemCount": { + "description": "Max number of human labeled DataItems.", + "format": "int64", "type": "string" }, - "publicationDate": { - "$ref": "GoogleTypeDate", - "description": "Publication date of the attribution." - }, - "startIndex": { - "description": "Start index into the content.", + "maxDataItemPercentage": { + "description": "Max percent of total DataItems for human labeling.", "format": "int32", "type": "integer" }, - "title": { - "description": "Title of the attribution.", - "type": "string" + "sampleConfig": { + "$ref": "GoogleCloudAiplatformV1SampleConfig", + "description": "Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy." }, - "uri": { - "description": "Url reference of the attribution.", - "type": "string" + "trainingConfig": { + "$ref": "GoogleCloudAiplatformV1TrainingConfig", + "description": "CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems." } }, "type": "object" }, - "CloudAiNlLlmProtoServiceCitationMetadata": { - "description": "A collection of source attributions for a piece of content.", - "id": "CloudAiNlLlmProtoServiceCitationMetadata", + "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest": { + "description": "Request message for MetadataService.AddContextArtifactsAndExecutions.", + "id": "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest", "properties": { - "citations": { - "description": "List of citations.", + "artifacts": { + "description": "The resource names of the Artifacts to attribute to the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`", + "items": { + "type": "string" + }, + "type": "array" + }, + "executions": { + "description": "The resource names of the Executions to associate with the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`", "items": { - "$ref": "CloudAiNlLlmProtoServiceCitation" + "type": "string" }, "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceContent": { - "description": "The content of a single message from a participant.", - "id": "CloudAiNlLlmProtoServiceContent", + "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse": { + "description": "Response message for MetadataService.AddContextArtifactsAndExecutions.", + "id": "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1AddContextChildrenRequest": { + "description": "Request message for MetadataService.AddContextChildren.", + "id": "GoogleCloudAiplatformV1AddContextChildrenRequest", "properties": { - "isCached": { - "description": "If true, the content is from a cached content.", - "type": "boolean" - }, - "parts": { - "description": "The parts of the message.", + "childContexts": { + "description": "The resource names of the child Contexts.", "items": { - "$ref": "CloudAiNlLlmProtoServicePart" + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1AddContextChildrenResponse": { + "description": "Response message for MetadataService.AddContextChildren.", + "id": "GoogleCloudAiplatformV1AddContextChildrenResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1AddExecutionEventsRequest": { + "description": "Request message for MetadataService.AddExecutionEvents.", + "id": "GoogleCloudAiplatformV1AddExecutionEventsRequest", + "properties": { + "events": { + "description": "The Events to create and add.", + "items": { + "$ref": "GoogleCloudAiplatformV1Event" }, "type": "array" - }, - "role": { - "description": "The role of the current conversation participant.", - "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceFact": { - "description": "A condense version of WorldFact (assistant/boq/lamda/factuality/proto/factuality.proto) to propagate the essential information about the fact used in factuality to the upstream caller.", - "id": "CloudAiNlLlmProtoServiceFact", + "GoogleCloudAiplatformV1AddExecutionEventsResponse": { + "description": "Response message for MetadataService.AddExecutionEvents.", + "id": "GoogleCloudAiplatformV1AddExecutionEventsResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1AddTrialMeasurementRequest": { + "description": "Request message for VizierService.AddTrialMeasurement.", + "id": "GoogleCloudAiplatformV1AddTrialMeasurementRequest", "properties": { - "query": { - "description": "Query that is used to retrieve this fact.", + "measurement": { + "$ref": "GoogleCloudAiplatformV1Measurement", + "description": "Required. The measurement to be added to a Trial." + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1Annotation": { + "description": "Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem.", + "id": "GoogleCloudAiplatformV1Annotation", + "properties": { + "annotationSource": { + "$ref": "GoogleCloudAiplatformV1UserActionReference", + "description": "Output only. The source of the Annotation.", + "readOnly": true + }, + "createTime": { + "description": "Output only. Timestamp when this Annotation was created.", + "format": "google-datetime", + "readOnly": true, "type": "string" }, - "summary": { - "description": "If present, the summary/snippet of the fact.", + "etag": { + "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", "type": "string" }, - "title": { - "description": "If present, it refers to the title of this fact.", + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with \"aiplatform.googleapis.com/\" and are immutable. Following system labels exist for each Annotation: * \"aiplatform.googleapis.com/annotation_set_name\": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * \"aiplatform.googleapis.com/payload_schema\": output only, its value is the payload_schema's title.", + "type": "object" + }, + "name": { + "description": "Output only. Resource name of the Annotation.", + "readOnly": true, + "type": "string" + }, + "payload": { + "description": "Required. The schema of the payload can be found in payload_schema.", + "type": "any" + }, + "payloadSchemaUri": { + "description": "Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata.", "type": "string" }, - "url": { - "description": "If present, this URL links to the webpage of the fact.", + "updateTime": { + "description": "Output only. Timestamp when this Annotation was last updated.", + "format": "google-datetime", + "readOnly": true, "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceFunctionCall": { - "description": "Function call details.", - "id": "CloudAiNlLlmProtoServiceFunctionCall", + "GoogleCloudAiplatformV1AnnotationSpec": { + "description": "Identifies a concept with which DataItems may be annotated with.", + "id": "GoogleCloudAiplatformV1AnnotationSpec", "properties": { - "args": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" - }, - "description": "The function parameters and values in JSON format.", - "type": "object" + "createTime": { + "description": "Output only. Timestamp when this AnnotationSpec was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "displayName": { + "description": "Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters.", + "type": "string" + }, + "etag": { + "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", + "type": "string" }, "name": { - "description": "Required. The name of the function to call.", + "description": "Output only. Resource name of the AnnotationSpec.", + "readOnly": true, + "type": "string" + }, + "updateTime": { + "description": "Output only. Timestamp when AnnotationSpec was last updated.", + "format": "google-datetime", + "readOnly": true, "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceFunctionResponse": { - "description": "Function response details.", - "id": "CloudAiNlLlmProtoServiceFunctionResponse", + "GoogleCloudAiplatformV1Artifact": { + "description": "Instance of a general artifact.", + "id": "GoogleCloudAiplatformV1Artifact", "properties": { - "name": { - "description": "Required. The name of the function to call.", + "createTime": { + "description": "Output only. Timestamp when this Artifact was created.", + "format": "google-datetime", + "readOnly": true, "type": "string" }, - "response": { + "description": { + "description": "Description of the Artifact", + "type": "string" + }, + "displayName": { + "description": "User provided display name of the Artifact. May be up to 128 Unicode characters.", + "type": "string" + }, + "etag": { + "description": "An eTag used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded).", + "type": "object" + }, + "metadata": { "additionalProperties": { "description": "Properties of the object.", "type": "any" }, - "description": "Required. The function response in JSON object format.", + "description": "Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB.", "type": "object" - } - }, - "type": "object" - }, - "CloudAiNlLlmProtoServiceGenerateMultiModalResponse": { - "id": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse", - "properties": { - "candidates": { - "description": "Possible candidate responses to the conversation up until this point.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceCandidate" - }, - "type": "array" }, - "debugMetadata": { - "$ref": "CloudAiNlLlmProtoServiceMessageMetadata", - "description": "Debug information containing message metadata. Clients should not consume this field, and this is only populated for Flow Runner path." + "name": { + "description": "Output only. The resource name of the Artifact.", + "readOnly": true, + "type": "string" + }, + "schemaTitle": { + "description": "The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", + "type": "string" }, - "facts": { - "description": "External facts retrieved for factuality/grounding.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceFact" - }, - "type": "array" + "schemaVersion": { + "description": "The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", + "type": "string" }, - "promptFeedback": { - "$ref": "CloudAiNlLlmProtoServicePromptFeedback", - "description": "Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations." + "state": { + "description": "The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions.", + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "LIVE" + ], + "enumDescriptions": [ + "Unspecified state for the Artifact.", + "A state used by systems like Vertex AI Pipelines to indicate that the underlying data item represented by this Artifact is being created.", + "A state indicating that the Artifact should exist, unless something external to the system deletes it." + ], + "type": "string" }, - "reportingMetrics": { - "$ref": "IntelligenceCloudAutomlXpsReportingMetrics", - "description": "Billable prediction metrics." + "updateTime": { + "description": "Output only. Timestamp when this Artifact was last updated.", + "format": "google-datetime", + "readOnly": true, + "type": "string" }, - "usageMetadata": { - "$ref": "CloudAiNlLlmProtoServiceUsageMetadata", - "description": "Usage metadata about the response(s)." + "uri": { + "description": "The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file.", + "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceMessageMetadata": { - "id": "CloudAiNlLlmProtoServiceMessageMetadata", + "GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata": { + "description": "Metadata information for NotebookService.AssignNotebookRuntime.", + "id": "GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata", "properties": { - "factualityDebugMetadata": { - "$ref": "LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata", - "description": "Factuality-related debug metadata." + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." }, - "inputFilterInfo": { - "$ref": "LearningServingLlmMessageMetadata", - "description": "Filter metadata of the input messages." + "progressMessage": { + "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1AssignNotebookRuntimeRequest": { + "description": "Request message for NotebookService.AssignNotebookRuntime.", + "id": "GoogleCloudAiplatformV1AssignNotebookRuntimeRequest", + "properties": { + "notebookRuntime": { + "$ref": "GoogleCloudAiplatformV1NotebookRuntime", + "description": "Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment." }, - "modelRoutingDecision": { - "$ref": "LearningGenaiRootRoutingDecision", - "description": "This score is generated by the router model to decide which model to use" + "notebookRuntimeId": { + "description": "Optional. User specified ID for the notebook runtime.", + "type": "string" }, - "outputFilterInfo": { - "description": "Filter metadata of the output messages.", - "items": { - "$ref": "LearningServingLlmMessageMetadata" - }, - "type": "array" + "notebookRuntimeTemplate": { + "description": "Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one).", + "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServicePart": { - "description": "A single part of a message.", - "id": "CloudAiNlLlmProtoServicePart", + "GoogleCloudAiplatformV1Attribution": { + "description": "Attribution that explains a particular prediction output.", + "id": "GoogleCloudAiplatformV1Attribution", "properties": { - "documentMetadata": { - "$ref": "CloudAiNlLlmProtoServicePartDocumentMetadata", - "description": "Document metadata. The metadata should only be used by the Cloud LLM when supporting document mime types. It will only be populated when this image input part is converted from a document input part." + "approximationError": { + "description": "Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.", + "format": "double", + "readOnly": true, + "type": "number" }, - "fileData": { - "$ref": "CloudAiNlLlmProtoServicePartFileData", - "description": "URI-based data." + "baselineOutputValue": { + "description": "Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.", + "format": "double", + "readOnly": true, + "type": "number" }, - "functionCall": { - "$ref": "CloudAiNlLlmProtoServiceFunctionCall", - "description": "Function call data." + "featureAttributions": { + "description": "Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).", + "readOnly": true, + "type": "any" }, - "functionResponse": { - "$ref": "CloudAiNlLlmProtoServiceFunctionResponse", - "description": "Function response data." + "instanceOutputValue": { + "description": "Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.", + "format": "double", + "readOnly": true, + "type": "number" }, - "inlineData": { - "$ref": "CloudAiNlLlmProtoServicePartBlob", - "description": "Inline bytes data" + "outputDisplayName": { + "description": "Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.", + "readOnly": true, + "type": "string" }, - "lmRootMetadata": { - "$ref": "CloudAiNlLlmProtoServicePartLMRootMetadata", - "description": "Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields." + "outputIndex": { + "description": "Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.", + "items": { + "format": "int32", + "type": "integer" + }, + "readOnly": true, + "type": "array" }, - "text": { - "description": "Text input.", + "outputName": { + "description": "Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.", + "readOnly": true, "type": "string" - }, - "videoMetadata": { - "$ref": "CloudAiNlLlmProtoServicePartVideoMetadata", - "description": "Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data." } }, "type": "object" }, - "CloudAiNlLlmProtoServicePartBlob": { - "description": "Represents arbitrary blob data input.", - "id": "CloudAiNlLlmProtoServicePartBlob", + "GoogleCloudAiplatformV1AutomaticResources": { + "description": "A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines.", + "id": "GoogleCloudAiplatformV1AutomaticResources", "properties": { - "data": { - "description": "Inline data.", - "format": "byte", - "type": "string" - }, - "mimeType": { - "description": "The mime type corresponding to this input.", - "type": "string" + "maxReplicaCount": { + "description": "Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.", + "format": "int32", + "type": "integer" }, - "originalFileData": { - "$ref": "CloudAiNlLlmProtoServicePartFileData", - "description": "Original file data where the blob comes from." + "minReplicaCount": { + "description": "Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "CloudAiNlLlmProtoServicePartDocumentMetadata": { - "description": "Metadata describes the original input document content.", - "id": "CloudAiNlLlmProtoServicePartDocumentMetadata", + "GoogleCloudAiplatformV1AutoscalingMetricSpec": { + "description": "The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count.", + "id": "GoogleCloudAiplatformV1AutoscalingMetricSpec", "properties": { - "originalDocumentBlob": { - "$ref": "CloudAiNlLlmProtoServicePartBlob", - "description": "The original document blob." + "metricName": { + "description": "Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`", + "type": "string" }, - "pageNumber": { - "description": "The (1-indexed) page number of the image in the original document. The first page carries the original document content and mime type.", + "target": { + "description": "The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.", "format": "int32", "type": "integer" } }, "type": "object" }, - "CloudAiNlLlmProtoServicePartFileData": { - "description": "Represents file data.", - "id": "CloudAiNlLlmProtoServicePartFileData", + "GoogleCloudAiplatformV1AvroSource": { + "description": "The storage details for Avro input content.", + "id": "GoogleCloudAiplatformV1AvroSource", "properties": { - "fileUri": { - "description": "Inline data.", - "type": "string" - }, - "mimeType": { - "description": "The mime type corresponding to this input.", - "type": "string" + "gcsSource": { + "$ref": "GoogleCloudAiplatformV1GcsSource", + "description": "Required. Google Cloud Storage location." } }, "type": "object" }, - "CloudAiNlLlmProtoServicePartLMRootMetadata": { - "description": "Metadata provides extra info for building the LM Root request.", - "id": "CloudAiNlLlmProtoServicePartLMRootMetadata", + "GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest": { + "description": "Request message for PipelineService.BatchCancelPipelineJobs.", + "id": "GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest", "properties": { - "chunkId": { - "description": "Chunk id that will be used when mapping the part to the LM Root's chunk.", - "type": "string" + "names": { + "description": "Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}`", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServicePartVideoMetadata": { - "description": "Metadata describes the input video content.", - "id": "CloudAiNlLlmProtoServicePartVideoMetadata", + "GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata": { + "description": "Details of operations that perform batch create Features.", + "id": "GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata", "properties": { - "endOffset": { - "description": "The end offset of the video.", - "format": "google-duration", - "type": "string" - }, - "startOffset": { - "description": "The start offset of the video.", - "format": "google-duration", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for Feature." } }, "type": "object" }, - "CloudAiNlLlmProtoServicePromptFeedback": { - "description": "Content filter results for a prompt sent in the request.", - "id": "CloudAiNlLlmProtoServicePromptFeedback", + "GoogleCloudAiplatformV1BatchCreateFeaturesRequest": { + "description": "Request message for FeaturestoreService.BatchCreateFeatures.", + "id": "GoogleCloudAiplatformV1BatchCreateFeaturesRequest", "properties": { - "blockReason": { - "description": "Blocked reason.", - "enum": [ - "BLOCKED_REASON_UNSPECIFIED", - "SAFETY", - "OTHER", - "BLOCKLIST", - "PROHIBITED_CONTENT" - ], - "enumDescriptions": [ - "Unspecified blocked reason.", - "Candidates blocked due to safety.", - "Candidates blocked due to other reason (currently only language filter).", - "Candidates blocked due to the terms which are included from the terminology blocklist.", - "Candidates blocked due to prohibited content (currently only CSAM)." - ], - "type": "string" - }, - "blockReasonMessage": { - "description": "A readable block reason message.", - "type": "string" - }, - "safetyRatings": { - "description": "Safety ratings.", + "requests": { + "description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", "items": { - "$ref": "CloudAiNlLlmProtoServiceSafetyRating" + "$ref": "GoogleCloudAiplatformV1CreateFeatureRequest" }, "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceRaiResult": { - "description": "The RAI results for a given text. Next ID: 12", - "id": "CloudAiNlLlmProtoServiceRaiResult", + "GoogleCloudAiplatformV1BatchCreateFeaturesResponse": { + "description": "Response message for FeaturestoreService.BatchCreateFeatures.", + "id": "GoogleCloudAiplatformV1BatchCreateFeaturesResponse", "properties": { - "aidaRecitationResult": { - "$ref": "LanguageLabsAidaTrustRecitationProtoRecitationResult", - "description": "Recitation result from Aida recitation checker." - }, - "blocked": { - "deprecated": true, - "description": "Use `triggered_blocklist`.", - "type": "boolean" - }, - "errorCodes": { - "description": "The error codes indicate which RAI filters block the response.", - "items": { - "format": "int32", - "type": "integer" - }, - "type": "array" - }, - "filtered": { - "description": "Whether the text should be filtered and not shown to the end user. This is determined based on a combination of `triggered_recitation`, `triggered_blocklist`, `language_filter_result`, and `triggered_safety_filter`.", - "type": "boolean" - }, - "languageFilterResult": { - "$ref": "LearningGenaiRootLanguageFilterResult", - "description": "Language filter result from SAFT LangId." - }, - "mmRecitationResult": { - "$ref": "LearningGenaiRecitationMMRecitationCheckResult", - "description": "Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked." - }, - "raiSignals": { - "description": "The RAI signals for the text.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceRaiSignal" - }, - "type": "array" - }, - "translationRequestInfos": { - "description": "Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server.", + "features": { + "description": "The Features created.", "items": { - "$ref": "LearningGenaiRootTranslationRequestInfo" + "$ref": "GoogleCloudAiplatformV1Feature" }, "type": "array" - }, - "triggeredBlocklist": { - "description": "Whether the text triggered the blocklist.", - "type": "boolean" - }, - "triggeredRecitation": { - "description": "Whether the text should be blocked by the recitation result from Aida recitation checker. It is determined from aida_recitation_result.", - "type": "boolean" - }, - "triggeredSafetyFilter": { - "description": "Whether the text triggered the safety filter. Currently, this is due to CSAI triggering or one of four categories (derogatory, sexual, toxic, violent) having a score over the filter threshold.", - "type": "boolean" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceRaiSignal": { - "description": "An RAI signal for a single category.", - "id": "CloudAiNlLlmProtoServiceRaiSignal", + "GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest": { + "description": "Request message for TensorboardService.BatchCreateTensorboardRuns.", + "id": "GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest", "properties": { - "confidence": { - "description": "The confidence level for the RAI category.", - "enum": [ - "CONFIDENCE_UNSPECIFIED", - "CONFIDENCE_NONE", - "CONFIDENCE_LOW", - "CONFIDENCE_MEDIUM", - "CONFIDENCE_HIGH" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "flagged": { - "description": "Whether the category is flagged as being present. Currently, this is set to true if score >= 0.5.", - "type": "boolean" - }, - "influentialTerms": { - "description": "The influential terms that could potentially block the response.", + "requests": { + "description": "Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch.", "items": { - "$ref": "CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm" + "$ref": "GoogleCloudAiplatformV1CreateTensorboardRunRequest" }, "type": "array" - }, - "raiCategory": { - "description": "The RAI category.", - "enum": [ - "RAI_CATEGORY_UNSPECIFIED", - "TOXIC", - "SEXUALLY_EXPLICIT", - "HATE_SPEECH", - "VIOLENT", - "PROFANITY", - "HARASSMENT", - "DEATH_HARM_TRAGEDY", - "FIREARMS_WEAPONS", - "PUBLIC_SAFETY", - "HEALTH", - "RELIGIOUS_BELIEF", - "ILLICIT_DRUGS", - "WAR_CONFLICT", - "POLITICS", - "FINANCE", - "LEGAL", - "CSAI", - "FRINGE", - "THREAT", - "SEVERE_TOXICITY", - "TOXICITY", - "SEXUAL", - "INSULT", - "DEROGATORY", - "IDENTITY_ATTACK", - "VIOLENCE_ABUSE", - "OBSCENE", - "DRUGS", - "CSAM", - "SPII", - "DANGEROUS_CONTENT", - "DANGEROUS_CONTENT_SEVERITY", - "INSULT_SEVERITY", - "DEROGATORY_SEVERITY", - "SEXUAL_SEVERITY" - ], - "enumDescriptions": [ - "", - "SafetyCat categories.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "GRAIL categories that can't be exposed to end users.", - "", - "Unused categories.", - "", - "Old category names.", - "", - "", - "", - "", - "", - "", - "", - "CSAM V2", - "SPII", - "New SafetyCat v3 categories", - "", - "", - "", - "" - ], - "type": "string" - }, - "score": { - "description": "The score for the category, in the range [0.0, 1.0].", - "format": "float", - "type": "number" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm": { - "description": "The influential term that could potentially block the response.", - "id": "CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm", + "GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse": { + "description": "Response message for TensorboardService.BatchCreateTensorboardRuns.", + "id": "GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse", "properties": { - "beginOffset": { - "description": "The beginning offset of the influential term.", - "format": "int32", - "type": "integer" - }, - "confidence": { - "description": "The confidence score of the influential term.", - "format": "float", - "type": "number" - }, - "source": { - "description": "The source of the influential term, prompt or response.", - "enum": [ - "SOURCE_UNSPECIFIED", - "PROMPT", - "RESPONSE" - ], - "enumDescriptions": [ - "Unspecified source.", - "The influential term comes from the prompt.", - "The influential term comes from the response." - ], - "type": "string" - }, - "term": { - "description": "The influential term.", - "type": "string" + "tensorboardRuns": { + "description": "The created TensorboardRuns.", + "items": { + "$ref": "GoogleCloudAiplatformV1TensorboardRun" + }, + "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceSafetyRating": { - "description": "Safety rating corresponding to the generated content.", - "id": "CloudAiNlLlmProtoServiceSafetyRating", + "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest": { + "description": "Request message for TensorboardService.BatchCreateTensorboardTimeSeries.", + "id": "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest", "properties": { - "blocked": { - "description": "Indicates whether the content was filtered out because of this rating.", - "type": "boolean" - }, - "category": { - "description": "Harm category.", - "enum": [ - "HARM_CATEGORY_UNSPECIFIED", - "HARM_CATEGORY_HATE_SPEECH", - "HARM_CATEGORY_DANGEROUS_CONTENT", - "HARM_CATEGORY_HARASSMENT", - "HARM_CATEGORY_SEXUALLY_EXPLICIT" - ], - "enumDescriptions": [ - "The harm category is unspecified.", - "The harm category is hate speech.", - "The harm category is dengerous content.", - "The harm category is harassment.", - "The harm category is sexually explicit." - ], - "type": "string" - }, - "influentialTerms": { - "description": "The influential terms that could potentially block the response.", + "requests": { + "description": "Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.", "items": { - "$ref": "CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm" + "$ref": "GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest" }, "type": "array" - }, - "probability": { - "description": "Harm probability levels in the content.", - "enum": [ - "HARM_PROBABILITY_UNSPECIFIED", - "NEGLIGIBLE", - "LOW", - "MEDIUM", - "HIGH" - ], - "enumDescriptions": [ - "Harm probability unspecified.", - "Negligible level of harm.", - "Low level of harm.", - "Medium level of harm.", - "High level of harm." - ], - "type": "string" - }, - "probabilityScore": { - "description": "Harm probability score.", - "format": "float", - "type": "number" - }, - "severity": { - "description": "Harm severity levels in the content.", - "enum": [ - "HARM_SEVERITY_UNSPECIFIED", - "HARM_SEVERITY_NEGLIGIBLE", - "HARM_SEVERITY_LOW", - "HARM_SEVERITY_MEDIUM", - "HARM_SEVERITY_HIGH" - ], - "enumDescriptions": [ - "Harm severity unspecified.", - "Negligible level of harm severity.", - "Low level of harm severity.", - "Medium level of harm severity.", - "High level of harm severity." - ], - "type": "string" - }, - "severityScore": { - "description": "Harm severity score.", - "format": "float", - "type": "number" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm": { - "description": "The influential term that could potentially block the response.", - "id": "CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm", + "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse": { + "description": "Response message for TensorboardService.BatchCreateTensorboardTimeSeries.", + "id": "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse", "properties": { - "beginOffset": { - "description": "The beginning offset of the influential term.", - "format": "int32", - "type": "integer" - }, - "confidence": { - "description": "The confidence score of the influential term.", - "format": "float", - "type": "number" - }, - "source": { - "description": "The source of the influential term, prompt or response.", - "enum": [ - "SOURCE_UNSPECIFIED", - "PROMPT", - "RESPONSE" - ], - "enumDescriptions": [ - "Unspecified source.", - "The influential term comes from the prompt.", - "The influential term comes from the response." - ], - "type": "string" - }, - "term": { - "description": "The influential term.", - "type": "string" + "tensorboardTimeSeries": { + "description": "The created TensorboardTimeSeries.", + "items": { + "$ref": "GoogleCloudAiplatformV1TensorboardTimeSeries" + }, + "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceUsageMetadata": { - "description": "Usage metadata about response(s).", - "id": "CloudAiNlLlmProtoServiceUsageMetadata", + "GoogleCloudAiplatformV1BatchDedicatedResources": { + "description": "A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration.", + "id": "GoogleCloudAiplatformV1BatchDedicatedResources", "properties": { - "candidatesTokenCount": { - "description": "Number of tokens in the response(s).", - "format": "int32", - "type": "integer" + "machineSpec": { + "$ref": "GoogleCloudAiplatformV1MachineSpec", + "description": "Required. Immutable. The specification of a single machine." }, - "promptTokenCount": { - "description": "Number of tokens in the request.", + "maxReplicaCount": { + "description": "Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10.", "format": "int32", "type": "integer" }, - "totalTokenCount": { + "startingReplicaCount": { + "description": "Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count", "format": "int32", "type": "integer" } }, "type": "object" }, - "GoogleApiHttpBody": { - "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.", - "id": "GoogleApiHttpBody", + "GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest": { + "description": "Request message for PipelineService.BatchDeletePipelineJobs.", + "id": "GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest", "properties": { - "contentType": { - "description": "The HTTP Content-Type header value specifying the content type of the body.", - "type": "string" - }, - "data": { - "description": "The HTTP request/response body as raw binary.", - "format": "byte", - "type": "string" - }, - "extensions": { - "description": "Application specific response metadata. Must be set in the first response for streaming APIs.", + "names": { + "description": "Required. The names of the PipelineJobs to delete. A maximum of 32 PipelineJobs can be deleted in a batch. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}`", "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" + "type": "string" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1ActiveLearningConfig": { - "description": "Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy.", - "id": "GoogleCloudAiplatformV1ActiveLearningConfig", + "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest": { + "description": "Request message for ModelService.BatchImportEvaluatedAnnotations", + "id": "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest", "properties": { - "maxDataItemCount": { - "description": "Max number of human labeled DataItems.", - "format": "int64", - "type": "string" - }, - "maxDataItemPercentage": { - "description": "Max percent of total DataItems for human labeling.", + "evaluatedAnnotations": { + "description": "Required. Evaluated annotations resource to be imported.", + "items": { + "$ref": "GoogleCloudAiplatformV1EvaluatedAnnotation" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse": { + "description": "Response message for ModelService.BatchImportEvaluatedAnnotations", + "id": "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse", + "properties": { + "importedEvaluatedAnnotationsCount": { + "description": "Output only. Number of EvaluatedAnnotations imported.", "format": "int32", + "readOnly": true, "type": "integer" - }, - "sampleConfig": { - "$ref": "GoogleCloudAiplatformV1SampleConfig", - "description": "Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy." - }, - "trainingConfig": { - "$ref": "GoogleCloudAiplatformV1TrainingConfig", - "description": "CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems." } }, "type": "object" }, - "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest": { - "description": "Request message for MetadataService.AddContextArtifactsAndExecutions.", - "id": "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest", + "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest": { + "description": "Request message for ModelService.BatchImportModelEvaluationSlices", + "id": "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest", "properties": { - "artifacts": { - "description": "The resource names of the Artifacts to attribute to the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`", + "modelEvaluationSlices": { + "description": "Required. Model evaluation slice resource to be imported.", "items": { - "type": "string" + "$ref": "GoogleCloudAiplatformV1ModelEvaluationSlice" }, "type": "array" - }, - "executions": { - "description": "The resource names of the Executions to associate with the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`", + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse": { + "description": "Response message for ModelService.BatchImportModelEvaluationSlices", + "id": "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse", + "properties": { + "importedModelEvaluationSlices": { + "description": "Output only. List of imported ModelEvaluationSlice.name.", "items": { "type": "string" }, + "readOnly": true, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse": { - "description": "Response message for MetadataService.AddContextArtifactsAndExecutions.", - "id": "GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1AddContextChildrenRequest": { - "description": "Request message for MetadataService.AddContextChildren.", - "id": "GoogleCloudAiplatformV1AddContextChildrenRequest", + "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata": { + "description": "Runtime operation information for MigrationService.BatchMigrateResources.", + "id": "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata", "properties": { - "childContexts": { - "description": "The resource names of the child Contexts.", + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The common part of the operation metadata." + }, + "partialResults": { + "description": "Partial results that reflect the latest migration operation progress.", "items": { - "type": "string" + "$ref": "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1AddContextChildrenResponse": { - "description": "Response message for MetadataService.AddContextChildren.", - "id": "GoogleCloudAiplatformV1AddContextChildrenResponse", - "properties": {}, + "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult": { + "description": "Represents a partial result in batch migration operation for one MigrateResourceRequest.", + "id": "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult", + "properties": { + "dataset": { + "description": "Migrated dataset resource name.", + "type": "string" + }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the migration request in case of failure." + }, + "model": { + "description": "Migrated model resource name.", + "type": "string" + }, + "request": { + "$ref": "GoogleCloudAiplatformV1MigrateResourceRequest", + "description": "It's the same as the value in MigrateResourceRequest.migrate_resource_requests." + } + }, "type": "object" }, - "GoogleCloudAiplatformV1AddExecutionEventsRequest": { - "description": "Request message for MetadataService.AddExecutionEvents.", - "id": "GoogleCloudAiplatformV1AddExecutionEventsRequest", + "GoogleCloudAiplatformV1BatchMigrateResourcesRequest": { + "description": "Request message for MigrationService.BatchMigrateResources.", + "id": "GoogleCloudAiplatformV1BatchMigrateResourcesRequest", "properties": { - "events": { - "description": "The Events to create and add.", + "migrateResourceRequests": { + "description": "Required. The request messages specifying the resources to migrate. They must be in the same location as the destination. Up to 50 resources can be migrated in one batch.", "items": { - "$ref": "GoogleCloudAiplatformV1Event" + "$ref": "GoogleCloudAiplatformV1MigrateResourceRequest" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1AddExecutionEventsResponse": { - "description": "Response message for MetadataService.AddExecutionEvents.", - "id": "GoogleCloudAiplatformV1AddExecutionEventsResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1AddTrialMeasurementRequest": { - "description": "Request message for VizierService.AddTrialMeasurement.", - "id": "GoogleCloudAiplatformV1AddTrialMeasurementRequest", + "GoogleCloudAiplatformV1BatchMigrateResourcesResponse": { + "description": "Response message for MigrationService.BatchMigrateResources.", + "id": "GoogleCloudAiplatformV1BatchMigrateResourcesResponse", "properties": { - "measurement": { - "$ref": "GoogleCloudAiplatformV1Measurement", - "description": "Required. The measurement to be added to a Trial." + "migrateResourceResponses": { + "description": "Successfully migrated resources.", + "items": { + "$ref": "GoogleCloudAiplatformV1MigrateResourceResponse" + }, + "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1Annotation": { - "description": "Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem.", - "id": "GoogleCloudAiplatformV1Annotation", + "GoogleCloudAiplatformV1BatchPredictionJob": { + "description": "A job that uses a Model to produce predictions on multiple input instances. If predictions for significant portion of the instances fail, the job may finish without attempting predictions for all remaining instances.", + "id": "GoogleCloudAiplatformV1BatchPredictionJob", "properties": { - "annotationSource": { - "$ref": "GoogleCloudAiplatformV1UserActionReference", - "description": "Output only. The source of the Annotation.", + "completionStats": { + "$ref": "GoogleCloudAiplatformV1CompletionStats", + "description": "Output only. Statistics on completed and failed prediction instances.", "readOnly": true }, "createTime": { - "description": "Output only. Timestamp when this Annotation was created.", + "description": "Output only. Time when the BatchPredictionJob was created.", "format": "google-datetime", "readOnly": true, "type": "string" }, - "etag": { - "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", + "dedicatedResources": { + "$ref": "GoogleCloudAiplatformV1BatchDedicatedResources", + "description": "The config of resources used by the Model during the batch prediction. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided." + }, + "disableContainerLogging": { + "description": "For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true.", + "type": "boolean" + }, + "displayName": { + "description": "Required. The user-defined name of this BatchPredictionJob.", + "type": "string" + }, + "encryptionSpec": { + "$ref": "GoogleCloudAiplatformV1EncryptionSpec", + "description": "Customer-managed encryption key options for a BatchPredictionJob. If this is set, then all resources created by the BatchPredictionJob will be encrypted with the provided encryption key." + }, + "endTime": { + "description": "Output only. Time when the BatchPredictionJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.", + "format": "google-datetime", + "readOnly": true, "type": "string" }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED.", + "readOnly": true + }, + "explanationSpec": { + "$ref": "GoogleCloudAiplatformV1ExplanationSpec", + "description": "Explanation configuration for this BatchPredictionJob. Can be specified only if generate_explanation is set to `true`. This value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of the explanation_spec object is not populated, the corresponding field of the Model.explanation_spec object is inherited." + }, + "generateExplanation": { + "description": "Generate explanation with the batch prediction results. When set to `true`, the batch prediction output changes based on the `predictions_format` field of the BatchPredictionJob.output_config object: * `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the Explanation object. * `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the Explanation object. * `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_spec must be populated.", + "type": "boolean" + }, + "inputConfig": { + "$ref": "GoogleCloudAiplatformV1BatchPredictionJobInputConfig", + "description": "Required. Input configuration of the instances on which predictions are performed. The schema of any single instance may be specified via the Model's PredictSchemata's instance_schema_uri." + }, + "instanceConfig": { + "$ref": "GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig", + "description": "Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model." + }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with \"aiplatform.googleapis.com/\" and are immutable. Following system labels exist for each Annotation: * \"aiplatform.googleapis.com/annotation_set_name\": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * \"aiplatform.googleapis.com/payload_schema\": output only, its value is the payload_schema's title.", + "description": "The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.", "type": "object" }, - "name": { - "description": "Output only. Resource name of the Annotation.", - "readOnly": true, + "manualBatchTuningParameters": { + "$ref": "GoogleCloudAiplatformV1ManualBatchTuningParameters", + "description": "Immutable. Parameters configuring the batch behavior. Currently only applicable when dedicated_resources are used (in other cases Vertex AI does the tuning itself)." + }, + "model": { + "description": "The name of the Model resource that produces the predictions via this job, must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. The model resource name may contain version id or version alias to specify the version. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` if no version is specified, the default version will be deployed. The model resource could also be a publisher model. Example: `publishers/{publisher}/models/{model}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}`", "type": "string" }, - "payload": { - "description": "Required. The schema of the payload can be found in payload_schema.", + "modelParameters": { + "description": "The parameters that govern the predictions. The schema of the parameters may be specified via the Model's PredictSchemata's parameters_schema_uri.", "type": "any" }, - "payloadSchemaUri": { - "description": "Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata.", - "type": "string" - }, - "updateTime": { - "description": "Output only. Timestamp when this Annotation was last updated.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1AnnotationSpec": { - "description": "Identifies a concept with which DataItems may be annotated with.", - "id": "GoogleCloudAiplatformV1AnnotationSpec", - "properties": { - "createTime": { - "description": "Output only. Timestamp when this AnnotationSpec was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "displayName": { - "description": "Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters.", - "type": "string" - }, - "etag": { - "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", + "modelVersionId": { + "description": "Output only. The version ID of the Model that produces the predictions via this job.", + "readOnly": true, "type": "string" }, "name": { - "description": "Output only. Resource name of the AnnotationSpec.", - "readOnly": true, - "type": "string" - }, - "updateTime": { - "description": "Output only. Timestamp when AnnotationSpec was last updated.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1Artifact": { - "description": "Instance of a general artifact.", - "id": "GoogleCloudAiplatformV1Artifact", - "properties": { - "createTime": { - "description": "Output only. Timestamp when this Artifact was created.", - "format": "google-datetime", + "description": "Output only. Resource name of the BatchPredictionJob.", "readOnly": true, "type": "string" }, - "description": { - "description": "Description of the Artifact", - "type": "string" - }, - "displayName": { - "description": "User provided display name of the Artifact. May be up to 128 Unicode characters.", - "type": "string" - }, - "etag": { - "description": "An eTag used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", - "type": "string" + "outputConfig": { + "$ref": "GoogleCloudAiplatformV1BatchPredictionJobOutputConfig", + "description": "Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of Model's PredictSchemata's instance_schema_uri and prediction_schema_uri." }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded).", - "type": "object" + "outputInfo": { + "$ref": "GoogleCloudAiplatformV1BatchPredictionJobOutputInfo", + "description": "Output only. Information further describing the output of this job.", + "readOnly": true }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" + "partialFailures": { + "description": "Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard Google Cloud error details.", + "items": { + "$ref": "GoogleRpcStatus" }, - "description": "Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB.", - "type": "object" - }, - "name": { - "description": "Output only. The resource name of the Artifact.", "readOnly": true, - "type": "string" + "type": "array" }, - "schemaTitle": { - "description": "The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", + "resourcesConsumed": { + "$ref": "GoogleCloudAiplatformV1ResourcesConsumed", + "description": "Output only. Information about resources that had been consumed by this job. Provided in real time at best effort basis, as well as a final value once the job completes. Note: This field currently may be not populated for batch predictions that use AutoML Models.", + "readOnly": true + }, + "serviceAccount": { + "description": "The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account.", "type": "string" }, - "schemaVersion": { - "description": "The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", + "startTime": { + "description": "Output only. Time when the BatchPredictionJob for the first time entered the `JOB_STATE_RUNNING` state.", + "format": "google-datetime", + "readOnly": true, "type": "string" }, "state": { - "description": "The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions.", + "description": "Output only. The detailed state of the job.", "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "LIVE" + "JOB_STATE_UNSPECIFIED", + "JOB_STATE_QUEUED", + "JOB_STATE_PENDING", + "JOB_STATE_RUNNING", + "JOB_STATE_SUCCEEDED", + "JOB_STATE_FAILED", + "JOB_STATE_CANCELLING", + "JOB_STATE_CANCELLED", + "JOB_STATE_PAUSED", + "JOB_STATE_EXPIRED", + "JOB_STATE_UPDATING", + "JOB_STATE_PARTIALLY_SUCCEEDED" ], "enumDescriptions": [ - "Unspecified state for the Artifact.", - "A state used by systems like Vertex AI Pipelines to indicate that the underlying data item represented by this Artifact is being created.", - "A state indicating that the Artifact should exist, unless something external to the system deletes it." + "The job state is unspecified.", + "The job has been just created or resumed and processing has not yet begun.", + "The service is preparing to run the job.", + "The job is in progress.", + "The job completed successfully.", + "The job failed.", + "The job is being cancelled. From this state the job may only go to either `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.", + "The job has been cancelled.", + "The job has been stopped, and can be resumed.", + "The job has expired.", + "The job is being updated. Only jobs in the `RUNNING` state can be updated. After updating, the job goes back to the `RUNNING` state.", + "The job is partially succeeded, some results may be missing due to errors." ], + "readOnly": true, "type": "string" }, + "unmanagedContainerModel": { + "$ref": "GoogleCloudAiplatformV1UnmanagedContainerModel", + "description": "Contains model information necessary to perform batch prediction without requiring uploading to model registry. Exactly one of model and unmanaged_container_model must be set." + }, "updateTime": { - "description": "Output only. Timestamp when this Artifact was last updated.", + "description": "Output only. Time when the BatchPredictionJob was most recently updated.", "format": "google-datetime", "readOnly": true, "type": "string" - }, - "uri": { - "description": "The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata": { - "description": "Metadata information for NotebookService.AssignNotebookRuntime.", - "id": "GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - }, - "progressMessage": { - "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", - "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1AssignNotebookRuntimeRequest": { - "description": "Request message for NotebookService.AssignNotebookRuntime.", - "id": "GoogleCloudAiplatformV1AssignNotebookRuntimeRequest", + "GoogleCloudAiplatformV1BatchPredictionJobInputConfig": { + "description": "Configures the input to BatchPredictionJob. See Model.supported_input_storage_formats for Model's supported input formats, and how instances should be expressed via any of them.", + "id": "GoogleCloudAiplatformV1BatchPredictionJobInputConfig", "properties": { - "notebookRuntime": { - "$ref": "GoogleCloudAiplatformV1NotebookRuntime", - "description": "Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment." + "bigquerySource": { + "$ref": "GoogleCloudAiplatformV1BigQuerySource", + "description": "The BigQuery location of the input table. The schema of the table should be in the format described by the given context OpenAPI Schema, if one is provided. The table may contain additional columns that are not described by the schema, and they will be ignored." }, - "notebookRuntimeId": { - "description": "Optional. User specified ID for the notebook runtime.", - "type": "string" + "gcsSource": { + "$ref": "GoogleCloudAiplatformV1GcsSource", + "description": "The Cloud Storage location for the input instances." }, - "notebookRuntimeTemplate": { - "description": "Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one).", + "instancesFormat": { + "description": "Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats.", "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1Attribution": { - "description": "Attribution that explains a particular prediction output.", - "id": "GoogleCloudAiplatformV1Attribution", + "GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig": { + "description": "Configuration defining how to transform batch prediction input instances to the instances that the Model accepts.", + "id": "GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig", "properties": { - "approximationError": { - "description": "Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.", - "format": "double", - "readOnly": true, - "type": "number" - }, - "baselineOutputValue": { - "description": "Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.", - "format": "double", - "readOnly": true, - "type": "number" - }, - "featureAttributions": { - "description": "Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).", - "readOnly": true, - "type": "any" - }, - "instanceOutputValue": { - "description": "Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.", - "format": "double", - "readOnly": true, - "type": "number" - }, - "outputDisplayName": { - "description": "Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.", - "readOnly": true, - "type": "string" + "excludedFields": { + "description": "Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.", + "items": { + "type": "string" + }, + "type": "array" }, - "outputIndex": { - "description": "Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.", + "includedFields": { + "description": "Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.", "items": { - "format": "int32", - "type": "integer" + "type": "string" }, - "readOnly": true, "type": "array" }, - "outputName": { - "description": "Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.", - "readOnly": true, + "instanceType": { + "description": "The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{\"b64\": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{\"b64\": }`, where `` is the Base64-encoded string of the content of the file.", + "type": "string" + }, + "keyField": { + "description": "The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in excluded_fields. In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.", "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1AutomaticResources": { - "description": "A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines.", - "id": "GoogleCloudAiplatformV1AutomaticResources", + "GoogleCloudAiplatformV1BatchPredictionJobOutputConfig": { + "description": "Configures the output of BatchPredictionJob. See Model.supported_output_storage_formats for supported output formats, and how predictions are expressed via any of them.", + "id": "GoogleCloudAiplatformV1BatchPredictionJobOutputConfig", "properties": { - "maxReplicaCount": { - "description": "Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.", - "format": "int32", - "type": "integer" + "bigqueryDestination": { + "$ref": "GoogleCloudAiplatformV1BigQueryDestination", + "description": "The BigQuery project or dataset location where the output is to be written to. If project is provided, a new dataset is created with name `prediction__` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ \"based on ISO-8601\" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both instance and prediction schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single \"errors\" column, which as values has google.rpc.Status represented as a STRUCT, and containing only `code` and `message`." }, - "minReplicaCount": { - "description": "Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.", - "format": "int32", - "type": "integer" + "gcsDestination": { + "$ref": "GoogleCloudAiplatformV1GcsDestination", + "description": "The Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., `predictions_N.` are created where `` depends on chosen predictions_format, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both instance and prediction schemata defined then each such file contains predictions as per the predictions_format. If prediction for any instance failed (partially or completely), then an additional `errors_0001.`, `errors_0002.`,..., `errors_N.` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has google.rpc.Status containing only `code` and `message` fields." + }, + "predictionsFormat": { + "description": "Required. The format in which Vertex AI gives the predictions, must be one of the Model's supported_output_storage_formats.", + "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1AutoscalingMetricSpec": { - "description": "The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count.", - "id": "GoogleCloudAiplatformV1AutoscalingMetricSpec", + "GoogleCloudAiplatformV1BatchPredictionJobOutputInfo": { + "description": "Further describes this job's output. Supplements output_config.", + "id": "GoogleCloudAiplatformV1BatchPredictionJobOutputInfo", "properties": { - "metricName": { - "description": "Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`", + "bigqueryOutputDataset": { + "description": "Output only. The path of the BigQuery dataset created, in `bq://projectId.bqDatasetId` format, into which the prediction output is written.", + "readOnly": true, "type": "string" }, - "target": { - "description": "The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1AvroSource": { - "description": "The storage details for Avro input content.", - "id": "GoogleCloudAiplatformV1AvroSource", - "properties": { - "gcsSource": { - "$ref": "GoogleCloudAiplatformV1GcsSource", - "description": "Required. Google Cloud Storage location." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest": { - "description": "Request message for PipelineService.BatchCancelPipelineJobs.", - "id": "GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest", - "properties": { - "names": { - "description": "Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}`", - "items": { - "type": "string" - }, - "type": "array" + "bigqueryOutputTable": { + "description": "Output only. The name of the BigQuery table created, in `predictions_` format, into which the prediction output is written. Can be used by UI to generate the BigQuery output path, for example.", + "readOnly": true, + "type": "string" + }, + "gcsOutputDirectory": { + "description": "Output only. The full path of the Cloud Storage directory created, into which the prediction output is written.", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata": { - "description": "Details of operations that perform batch create Features.", - "id": "GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata", + "GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata": { + "description": "Details of operations that batch reads Feature values.", + "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata", "properties": { "genericMetadata": { "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for Feature." + "description": "Operation metadata for Featurestore batch read Features values." } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchCreateFeaturesRequest": { - "description": "Request message for FeaturestoreService.BatchCreateFeatures.", - "id": "GoogleCloudAiplatformV1BatchCreateFeaturesRequest", + "GoogleCloudAiplatformV1BatchReadFeatureValuesRequest": { + "description": "Request message for FeaturestoreService.BatchReadFeatureValues.", + "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequest", "properties": { - "requests": { - "description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", + "bigqueryReadInstances": { + "$ref": "GoogleCloudAiplatformV1BigQuerySource", + "description": "Similar to csv_read_instances, but from BigQuery source." + }, + "csvReadInstances": { + "$ref": "GoogleCloudAiplatformV1CsvSource", + "description": "Each read instance consists of exactly one read timestamp and one or more entity IDs identifying entities of the corresponding EntityTypes whose Features are requested. Each output instance contains Feature values of requested entities concatenated together as of the read time. An example read instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z`. An example output instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each read instance must be millisecond-aligned. `csv_read_instances` are read instances stored in a plain-text CSV file. The header should be: [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in any order. Values in the timestamp column must use the RFC 3339 format, e.g. `2012-07-30T10:43:17.123Z`." + }, + "destination": { + "$ref": "GoogleCloudAiplatformV1FeatureValueDestination", + "description": "Required. Specifies output location and format." + }, + "entityTypeSpecs": { + "description": "Required. Specifies EntityType grouping Features to read values of and settings.", "items": { - "$ref": "GoogleCloudAiplatformV1CreateFeatureRequest" + "$ref": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec" }, "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchCreateFeaturesResponse": { - "description": "Response message for FeaturestoreService.BatchCreateFeatures.", - "id": "GoogleCloudAiplatformV1BatchCreateFeaturesResponse", - "properties": { - "features": { - "description": "The Features created.", + }, + "passThroughFields": { + "description": "When not empty, the specified fields in the *_read_instances source will be joined as-is in the output, in addition to those fields from the Featurestore Entity. For BigQuery source, the type of the pass-through values will be automatically inferred. For CSV source, the pass-through values will be passed as opaque bytes.", "items": { - "$ref": "GoogleCloudAiplatformV1Feature" + "$ref": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField" }, "type": "array" + }, + "startTime": { + "description": "Optional. Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision.", + "format": "google-datetime", + "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest": { - "description": "Request message for TensorboardService.BatchCreateTensorboardRuns.", - "id": "GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest", + "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec": { + "description": "Selects Features of an EntityType to read values of and specifies read settings.", + "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec", "properties": { - "requests": { - "description": "Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch.", + "entityTypeId": { + "description": "Required. ID of the EntityType to select Features. The EntityType id is the entity_type_id specified during EntityType creation.", + "type": "string" + }, + "featureSelector": { + "$ref": "GoogleCloudAiplatformV1FeatureSelector", + "description": "Required. Selectors choosing which Feature values to read from the EntityType." + }, + "settings": { + "description": "Per-Feature settings for the batch read.", "items": { - "$ref": "GoogleCloudAiplatformV1CreateTensorboardRunRequest" + "$ref": "GoogleCloudAiplatformV1DestinationFeatureSetting" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse": { - "description": "Response message for TensorboardService.BatchCreateTensorboardRuns.", - "id": "GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse", + "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField": { + "description": "Describe pass-through fields in read_instance source.", + "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField", "properties": { - "tensorboardRuns": { - "description": "The created TensorboardRuns.", - "items": { - "$ref": "GoogleCloudAiplatformV1TensorboardRun" - }, - "type": "array" + "fieldName": { + "description": "Required. The name of the field in the CSV header or the name of the column in BigQuery table. The naming restriction is the same as Feature.name.", + "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest": { - "description": "Request message for TensorboardService.BatchCreateTensorboardTimeSeries.", - "id": "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest", - "properties": { - "requests": { - "description": "Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.", - "items": { - "$ref": "GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest" - }, - "type": "array" - } - }, + "GoogleCloudAiplatformV1BatchReadFeatureValuesResponse": { + "description": "Response message for FeaturestoreService.BatchReadFeatureValues.", + "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesResponse", + "properties": {}, "type": "object" }, - "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse": { - "description": "Response message for TensorboardService.BatchCreateTensorboardTimeSeries.", - "id": "GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse", + "GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse": { + "description": "Response message for TensorboardService.BatchReadTensorboardTimeSeriesData.", + "id": "GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse", "properties": { - "tensorboardTimeSeries": { - "description": "The created TensorboardTimeSeries.", + "timeSeriesData": { + "description": "The returned time series data.", "items": { - "$ref": "GoogleCloudAiplatformV1TensorboardTimeSeries" + "$ref": "GoogleCloudAiplatformV1TimeSeriesData" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchDedicatedResources": { - "description": "A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration.", - "id": "GoogleCloudAiplatformV1BatchDedicatedResources", + "GoogleCloudAiplatformV1BigQueryDestination": { + "description": "The BigQuery location for the output content.", + "id": "GoogleCloudAiplatformV1BigQueryDestination", "properties": { - "machineSpec": { - "$ref": "GoogleCloudAiplatformV1MachineSpec", - "description": "Required. Immutable. The specification of a single machine." - }, - "maxReplicaCount": { - "description": "Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10.", - "format": "int32", - "type": "integer" - }, - "startingReplicaCount": { - "description": "Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count", - "format": "int32", - "type": "integer" + "outputUri": { + "description": "Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`.", + "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest": { - "description": "Request message for PipelineService.BatchDeletePipelineJobs.", - "id": "GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest", + "GoogleCloudAiplatformV1BigQuerySource": { + "description": "The BigQuery location for the input content.", + "id": "GoogleCloudAiplatformV1BigQuerySource", "properties": { - "names": { - "description": "Required. The names of the PipelineJobs to delete. A maximum of 32 PipelineJobs can be deleted in a batch. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}`", - "items": { - "type": "string" - }, - "type": "array" + "inputUri": { + "description": "Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`.", + "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest": { - "description": "Request message for ModelService.BatchImportEvaluatedAnnotations", - "id": "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest", + "GoogleCloudAiplatformV1Blob": { + "description": "Content blob. It's preferred to send as text directly rather than raw bytes.", + "id": "GoogleCloudAiplatformV1Blob", "properties": { - "evaluatedAnnotations": { - "description": "Required. Evaluated annotations resource to be imported.", - "items": { - "$ref": "GoogleCloudAiplatformV1EvaluatedAnnotation" - }, - "type": "array" + "data": { + "description": "Required. Raw bytes.", + "format": "byte", + "type": "string" + }, + "mimeType": { + "description": "Required. The IANA standard MIME type of the source data.", + "type": "string" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse": { - "description": "Response message for ModelService.BatchImportEvaluatedAnnotations", - "id": "GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse", + "GoogleCloudAiplatformV1BlurBaselineConfig": { + "description": "Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383", + "id": "GoogleCloudAiplatformV1BlurBaselineConfig", "properties": { - "importedEvaluatedAnnotationsCount": { - "description": "Output only. Number of EvaluatedAnnotations imported.", - "format": "int32", - "readOnly": true, - "type": "integer" + "maxBlurSigma": { + "description": "The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.", + "format": "float", + "type": "number" } }, "type": "object" }, - "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest": { - "description": "Request message for ModelService.BatchImportModelEvaluationSlices", - "id": "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest", + "GoogleCloudAiplatformV1BoolArray": { + "description": "A list of boolean values.", + "id": "GoogleCloudAiplatformV1BoolArray", "properties": { - "modelEvaluationSlices": { - "description": "Required. Model evaluation slice resource to be imported.", + "values": { + "description": "A list of bool values.", "items": { - "$ref": "GoogleCloudAiplatformV1ModelEvaluationSlice" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse": { - "description": "Response message for ModelService.BatchImportModelEvaluationSlices", - "id": "GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse", - "properties": { - "importedModelEvaluationSlices": { - "description": "Output only. List of imported ModelEvaluationSlice.name.", - "items": { - "type": "string" - }, - "readOnly": true, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata": { - "description": "Runtime operation information for MigrationService.BatchMigrateResources.", - "id": "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The common part of the operation metadata." - }, - "partialResults": { - "description": "Partial results that reflect the latest migration operation progress.", - "items": { - "$ref": "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult": { - "description": "Represents a partial result in batch migration operation for one MigrateResourceRequest.", - "id": "GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult", - "properties": { - "dataset": { - "description": "Migrated dataset resource name.", - "type": "string" - }, - "error": { - "$ref": "GoogleRpcStatus", - "description": "The error result of the migration request in case of failure." - }, - "model": { - "description": "Migrated model resource name.", - "type": "string" - }, - "request": { - "$ref": "GoogleCloudAiplatformV1MigrateResourceRequest", - "description": "It's the same as the value in MigrateResourceRequest.migrate_resource_requests." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchMigrateResourcesRequest": { - "description": "Request message for MigrationService.BatchMigrateResources.", - "id": "GoogleCloudAiplatformV1BatchMigrateResourcesRequest", - "properties": { - "migrateResourceRequests": { - "description": "Required. The request messages specifying the resources to migrate. They must be in the same location as the destination. Up to 50 resources can be migrated in one batch.", - "items": { - "$ref": "GoogleCloudAiplatformV1MigrateResourceRequest" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchMigrateResourcesResponse": { - "description": "Response message for MigrationService.BatchMigrateResources.", - "id": "GoogleCloudAiplatformV1BatchMigrateResourcesResponse", - "properties": { - "migrateResourceResponses": { - "description": "Successfully migrated resources.", - "items": { - "$ref": "GoogleCloudAiplatformV1MigrateResourceResponse" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchPredictionJob": { - "description": "A job that uses a Model to produce predictions on multiple input instances. If predictions for significant portion of the instances fail, the job may finish without attempting predictions for all remaining instances.", - "id": "GoogleCloudAiplatformV1BatchPredictionJob", - "properties": { - "completionStats": { - "$ref": "GoogleCloudAiplatformV1CompletionStats", - "description": "Output only. Statistics on completed and failed prediction instances.", - "readOnly": true - }, - "createTime": { - "description": "Output only. Time when the BatchPredictionJob was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "dedicatedResources": { - "$ref": "GoogleCloudAiplatformV1BatchDedicatedResources", - "description": "The config of resources used by the Model during the batch prediction. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided." - }, - "disableContainerLogging": { - "description": "For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true.", - "type": "boolean" - }, - "displayName": { - "description": "Required. The user-defined name of this BatchPredictionJob.", - "type": "string" - }, - "encryptionSpec": { - "$ref": "GoogleCloudAiplatformV1EncryptionSpec", - "description": "Customer-managed encryption key options for a BatchPredictionJob. If this is set, then all resources created by the BatchPredictionJob will be encrypted with the provided encryption key." - }, - "endTime": { - "description": "Output only. Time when the BatchPredictionJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "error": { - "$ref": "GoogleRpcStatus", - "description": "Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED.", - "readOnly": true - }, - "explanationSpec": { - "$ref": "GoogleCloudAiplatformV1ExplanationSpec", - "description": "Explanation configuration for this BatchPredictionJob. Can be specified only if generate_explanation is set to `true`. This value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of the explanation_spec object is not populated, the corresponding field of the Model.explanation_spec object is inherited." - }, - "generateExplanation": { - "description": "Generate explanation with the batch prediction results. When set to `true`, the batch prediction output changes based on the `predictions_format` field of the BatchPredictionJob.output_config object: * `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the Explanation object. * `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the Explanation object. * `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_spec must be populated.", - "type": "boolean" - }, - "inputConfig": { - "$ref": "GoogleCloudAiplatformV1BatchPredictionJobInputConfig", - "description": "Required. Input configuration of the instances on which predictions are performed. The schema of any single instance may be specified via the Model's PredictSchemata's instance_schema_uri." - }, - "instanceConfig": { - "$ref": "GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig", - "description": "Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model." - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.", - "type": "object" - }, - "manualBatchTuningParameters": { - "$ref": "GoogleCloudAiplatformV1ManualBatchTuningParameters", - "description": "Immutable. Parameters configuring the batch behavior. Currently only applicable when dedicated_resources are used (in other cases Vertex AI does the tuning itself)." - }, - "model": { - "description": "The name of the Model resource that produces the predictions via this job, must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. The model resource name may contain version id or version alias to specify the version. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` if no version is specified, the default version will be deployed. The model resource could also be a publisher model. Example: `publishers/{publisher}/models/{model}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}`", - "type": "string" - }, - "modelParameters": { - "description": "The parameters that govern the predictions. The schema of the parameters may be specified via the Model's PredictSchemata's parameters_schema_uri.", - "type": "any" - }, - "modelVersionId": { - "description": "Output only. The version ID of the Model that produces the predictions via this job.", - "readOnly": true, - "type": "string" - }, - "name": { - "description": "Output only. Resource name of the BatchPredictionJob.", - "readOnly": true, - "type": "string" - }, - "outputConfig": { - "$ref": "GoogleCloudAiplatformV1BatchPredictionJobOutputConfig", - "description": "Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of Model's PredictSchemata's instance_schema_uri and prediction_schema_uri." - }, - "outputInfo": { - "$ref": "GoogleCloudAiplatformV1BatchPredictionJobOutputInfo", - "description": "Output only. Information further describing the output of this job.", - "readOnly": true - }, - "partialFailures": { - "description": "Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard Google Cloud error details.", - "items": { - "$ref": "GoogleRpcStatus" - }, - "readOnly": true, - "type": "array" - }, - "resourcesConsumed": { - "$ref": "GoogleCloudAiplatformV1ResourcesConsumed", - "description": "Output only. Information about resources that had been consumed by this job. Provided in real time at best effort basis, as well as a final value once the job completes. Note: This field currently may be not populated for batch predictions that use AutoML Models.", - "readOnly": true - }, - "serviceAccount": { - "description": "The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account.", - "type": "string" - }, - "startTime": { - "description": "Output only. Time when the BatchPredictionJob for the first time entered the `JOB_STATE_RUNNING` state.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "state": { - "description": "Output only. The detailed state of the job.", - "enum": [ - "JOB_STATE_UNSPECIFIED", - "JOB_STATE_QUEUED", - "JOB_STATE_PENDING", - "JOB_STATE_RUNNING", - "JOB_STATE_SUCCEEDED", - "JOB_STATE_FAILED", - "JOB_STATE_CANCELLING", - "JOB_STATE_CANCELLED", - "JOB_STATE_PAUSED", - "JOB_STATE_EXPIRED", - "JOB_STATE_UPDATING", - "JOB_STATE_PARTIALLY_SUCCEEDED" - ], - "enumDescriptions": [ - "The job state is unspecified.", - "The job has been just created or resumed and processing has not yet begun.", - "The service is preparing to run the job.", - "The job is in progress.", - "The job completed successfully.", - "The job failed.", - "The job is being cancelled. From this state the job may only go to either `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.", - "The job has been cancelled.", - "The job has been stopped, and can be resumed.", - "The job has expired.", - "The job is being updated. Only jobs in the `RUNNING` state can be updated. After updating, the job goes back to the `RUNNING` state.", - "The job is partially succeeded, some results may be missing due to errors." - ], - "readOnly": true, - "type": "string" - }, - "unmanagedContainerModel": { - "$ref": "GoogleCloudAiplatformV1UnmanagedContainerModel", - "description": "Contains model information necessary to perform batch prediction without requiring uploading to model registry. Exactly one of model and unmanaged_container_model must be set." - }, - "updateTime": { - "description": "Output only. Time when the BatchPredictionJob was most recently updated.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchPredictionJobInputConfig": { - "description": "Configures the input to BatchPredictionJob. See Model.supported_input_storage_formats for Model's supported input formats, and how instances should be expressed via any of them.", - "id": "GoogleCloudAiplatformV1BatchPredictionJobInputConfig", - "properties": { - "bigquerySource": { - "$ref": "GoogleCloudAiplatformV1BigQuerySource", - "description": "The BigQuery location of the input table. The schema of the table should be in the format described by the given context OpenAPI Schema, if one is provided. The table may contain additional columns that are not described by the schema, and they will be ignored." - }, - "gcsSource": { - "$ref": "GoogleCloudAiplatformV1GcsSource", - "description": "The Cloud Storage location for the input instances." - }, - "instancesFormat": { - "description": "Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig": { - "description": "Configuration defining how to transform batch prediction input instances to the instances that the Model accepts.", - "id": "GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig", - "properties": { - "excludedFields": { - "description": "Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.", - "items": { - "type": "string" - }, - "type": "array" - }, - "includedFields": { - "description": "Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord.", - "items": { - "type": "string" - }, - "type": "array" - }, - "instanceType": { - "description": "The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{\"b64\": }`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{\"b64\": }`, where `` is the Base64-encoded string of the content of the file.", - "type": "string" - }, - "keyField": { - "description": "The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in excluded_fields. In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchPredictionJobOutputConfig": { - "description": "Configures the output of BatchPredictionJob. See Model.supported_output_storage_formats for supported output formats, and how predictions are expressed via any of them.", - "id": "GoogleCloudAiplatformV1BatchPredictionJobOutputConfig", - "properties": { - "bigqueryDestination": { - "$ref": "GoogleCloudAiplatformV1BigQueryDestination", - "description": "The BigQuery project or dataset location where the output is to be written to. If project is provided, a new dataset is created with name `prediction__` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ \"based on ISO-8601\" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both instance and prediction schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single \"errors\" column, which as values has google.rpc.Status represented as a STRUCT, and containing only `code` and `message`." - }, - "gcsDestination": { - "$ref": "GoogleCloudAiplatformV1GcsDestination", - "description": "The Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., `predictions_N.` are created where `` depends on chosen predictions_format, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both instance and prediction schemata defined then each such file contains predictions as per the predictions_format. If prediction for any instance failed (partially or completely), then an additional `errors_0001.`, `errors_0002.`,..., `errors_N.` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has google.rpc.Status containing only `code` and `message` fields." - }, - "predictionsFormat": { - "description": "Required. The format in which Vertex AI gives the predictions, must be one of the Model's supported_output_storage_formats.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchPredictionJobOutputInfo": { - "description": "Further describes this job's output. Supplements output_config.", - "id": "GoogleCloudAiplatformV1BatchPredictionJobOutputInfo", - "properties": { - "bigqueryOutputDataset": { - "description": "Output only. The path of the BigQuery dataset created, in `bq://projectId.bqDatasetId` format, into which the prediction output is written.", - "readOnly": true, - "type": "string" - }, - "bigqueryOutputTable": { - "description": "Output only. The name of the BigQuery table created, in `predictions_` format, into which the prediction output is written. Can be used by UI to generate the BigQuery output path, for example.", - "readOnly": true, - "type": "string" - }, - "gcsOutputDirectory": { - "description": "Output only. The full path of the Cloud Storage directory created, into which the prediction output is written.", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata": { - "description": "Details of operations that batch reads Feature values.", - "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for Featurestore batch read Features values." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchReadFeatureValuesRequest": { - "description": "Request message for FeaturestoreService.BatchReadFeatureValues.", - "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequest", - "properties": { - "bigqueryReadInstances": { - "$ref": "GoogleCloudAiplatformV1BigQuerySource", - "description": "Similar to csv_read_instances, but from BigQuery source." - }, - "csvReadInstances": { - "$ref": "GoogleCloudAiplatformV1CsvSource", - "description": "Each read instance consists of exactly one read timestamp and one or more entity IDs identifying entities of the corresponding EntityTypes whose Features are requested. Each output instance contains Feature values of requested entities concatenated together as of the read time. An example read instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z`. An example output instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each read instance must be millisecond-aligned. `csv_read_instances` are read instances stored in a plain-text CSV file. The header should be: [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in any order. Values in the timestamp column must use the RFC 3339 format, e.g. `2012-07-30T10:43:17.123Z`." - }, - "destination": { - "$ref": "GoogleCloudAiplatformV1FeatureValueDestination", - "description": "Required. Specifies output location and format." - }, - "entityTypeSpecs": { - "description": "Required. Specifies EntityType grouping Features to read values of and settings.", - "items": { - "$ref": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec" - }, - "type": "array" - }, - "passThroughFields": { - "description": "When not empty, the specified fields in the *_read_instances source will be joined as-is in the output, in addition to those fields from the Featurestore Entity. For BigQuery source, the type of the pass-through values will be automatically inferred. For CSV source, the pass-through values will be passed as opaque bytes.", - "items": { - "$ref": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField" - }, - "type": "array" - }, - "startTime": { - "description": "Optional. Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision.", - "format": "google-datetime", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec": { - "description": "Selects Features of an EntityType to read values of and specifies read settings.", - "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec", - "properties": { - "entityTypeId": { - "description": "Required. ID of the EntityType to select Features. The EntityType id is the entity_type_id specified during EntityType creation.", - "type": "string" - }, - "featureSelector": { - "$ref": "GoogleCloudAiplatformV1FeatureSelector", - "description": "Required. Selectors choosing which Feature values to read from the EntityType." - }, - "settings": { - "description": "Per-Feature settings for the batch read.", - "items": { - "$ref": "GoogleCloudAiplatformV1DestinationFeatureSetting" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField": { - "description": "Describe pass-through fields in read_instance source.", - "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField", - "properties": { - "fieldName": { - "description": "Required. The name of the field in the CSV header or the name of the column in BigQuery table. The naming restriction is the same as Feature.name.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchReadFeatureValuesResponse": { - "description": "Response message for FeaturestoreService.BatchReadFeatureValues.", - "id": "GoogleCloudAiplatformV1BatchReadFeatureValuesResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse": { - "description": "Response message for TensorboardService.BatchReadTensorboardTimeSeriesData.", - "id": "GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse", - "properties": { - "timeSeriesData": { - "description": "The returned time series data.", - "items": { - "$ref": "GoogleCloudAiplatformV1TimeSeriesData" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BigQueryDestination": { - "description": "The BigQuery location for the output content.", - "id": "GoogleCloudAiplatformV1BigQueryDestination", - "properties": { - "outputUri": { - "description": "Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BigQuerySource": { - "description": "The BigQuery location for the input content.", - "id": "GoogleCloudAiplatformV1BigQuerySource", - "properties": { - "inputUri": { - "description": "Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1Blob": { - "description": "Content blob. It's preferred to send as text directly rather than raw bytes.", - "id": "GoogleCloudAiplatformV1Blob", - "properties": { - "data": { - "description": "Required. Raw bytes.", - "format": "byte", - "type": "string" - }, - "mimeType": { - "description": "Required. The IANA standard MIME type of the source data.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BlurBaselineConfig": { - "description": "Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383", - "id": "GoogleCloudAiplatformV1BlurBaselineConfig", - "properties": { - "maxBlurSigma": { - "description": "The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1BoolArray": { - "description": "A list of boolean values.", - "id": "GoogleCloudAiplatformV1BoolArray", - "properties": { - "values": { - "description": "A list of bool values.", - "items": { - "type": "boolean" + "type": "boolean" }, "type": "array" } @@ -22715,6 +21883,22 @@ "description": "Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag.", "format": "int32", "type": "integer" + }, + "rrf": { + "$ref": "GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF", + "description": "Optional. Represents RRF algorithm that combines search results." + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF": { + "description": "Parameters for RRF algorithm that combines search results.", + "id": "GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF", + "properties": { + "alpha": { + "description": "Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense.", + "format": "float", + "type": "number" } }, "type": "object" @@ -22763,6 +21947,11 @@ "description": "The distance between the neighbor and the dense embedding query.", "format": "double", "type": "number" + }, + "sparseDistance": { + "description": "The distance between the neighbor and the query sparse_embedding.", + "format": "double", + "type": "number" } }, "type": "object" @@ -23573,6 +22762,10 @@ "$ref": "GoogleCloudAiplatformV1IndexDatapointRestriction" }, "type": "array" + }, + "sparseEmbedding": { + "$ref": "GoogleCloudAiplatformV1IndexDatapointSparseEmbedding", + "description": "Optional. Feature embedding vector for sparse index." } }, "type": "object" @@ -23661,6 +22854,29 @@ }, "type": "object" }, + "GoogleCloudAiplatformV1IndexDatapointSparseEmbedding": { + "description": "Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions.", + "id": "GoogleCloudAiplatformV1IndexDatapointSparseEmbedding", + "properties": { + "dimensions": { + "description": "Required. The list of indexes for the embedding values of the sparse vector.", + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "values": { + "description": "Required. The list of embedding values of the sparse vector.", + "items": { + "format": "float", + "type": "number" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudAiplatformV1IndexEndpoint": { "description": "Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes.", "id": "GoogleCloudAiplatformV1IndexEndpoint", @@ -23773,6 +22989,12 @@ "readOnly": true, "type": "integer" }, + "sparseVectorsCount": { + "description": "Output only. The number of sparse vectors in the Index.", + "format": "int64", + "readOnly": true, + "type": "string" + }, "vectorsCount": { "description": "Output only. The number of dense vectors in the Index.", "format": "int64", @@ -26926,7 +26148,9 @@ "MULTIPLE_VALUES", "INVALID_NUMERIC_VALUE", "INVALID_ENCODING", - "INVALID_TOKEN_VALUE" + "INVALID_SPARSE_DIMENSIONS", + "INVALID_TOKEN_VALUE", + "INVALID_SPARSE_EMBEDDING" ], "enumDescriptions": [ "Default, shall not be used.", @@ -26943,7 +26167,9 @@ "Numeric restrict has multiple values specified.", "Numeric restrict has invalid numeric value specified.", "File is not in UTF_8 format.", - "Token restrict value is invalid." + "Error parsing sparse dimensions field.", + "Token restrict value is invalid.", + "Invalid sparse embedding." ], "type": "string" }, @@ -35525,6477 +34751,1018 @@ "readOnly": true, "type": "string" }, - "value": { - "description": "Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'.", - "readOnly": true, - "type": "any" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1TunedModel": { - "description": "The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob.", - "id": "GoogleCloudAiplatformV1TunedModel", - "properties": { - "endpoint": { - "description": "Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.", - "readOnly": true, - "type": "string" - }, - "model": { - "description": "Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}`.", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1TuningDataStats": { - "description": "The tuning data statistic values for TuningJob.", - "id": "GoogleCloudAiplatformV1TuningDataStats", - "properties": { - "supervisedTuningDataStats": { - "$ref": "GoogleCloudAiplatformV1SupervisedTuningDataStats", - "description": "The SFT Tuning data stats." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1TuningJob": { - "description": "Represents a TuningJob that runs with Google owned models.", - "id": "GoogleCloudAiplatformV1TuningJob", - "properties": { - "baseModel": { - "description": "The base model that is being tuned, e.g., \"gemini-1.0-pro-002\".", - "type": "string" - }, - "createTime": { - "description": "Output only. Time when the TuningJob was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Optional. The description of the TuningJob.", - "type": "string" - }, - "encryptionSpec": { - "$ref": "GoogleCloudAiplatformV1EncryptionSpec", - "description": "Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key." - }, - "endTime": { - "description": "Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "error": { - "$ref": "GoogleRpcStatus", - "description": "Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.", - "readOnly": true - }, - "experiment": { - "description": "Output only. The Experiment associated with this TuningJob.", - "readOnly": true, - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.", - "type": "object" - }, - "name": { - "description": "Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}`", - "readOnly": true, - "type": "string" - }, - "startTime": { - "description": "Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "state": { - "description": "Output only. The detailed state of the job.", - "enum": [ - "JOB_STATE_UNSPECIFIED", - "JOB_STATE_QUEUED", - "JOB_STATE_PENDING", - "JOB_STATE_RUNNING", - "JOB_STATE_SUCCEEDED", - "JOB_STATE_FAILED", - "JOB_STATE_CANCELLING", - "JOB_STATE_CANCELLED", - "JOB_STATE_PAUSED", - "JOB_STATE_EXPIRED", - "JOB_STATE_UPDATING", - "JOB_STATE_PARTIALLY_SUCCEEDED" - ], - "enumDescriptions": [ - "The job state is unspecified.", - "The job has been just created or resumed and processing has not yet begun.", - "The service is preparing to run the job.", - "The job is in progress.", - "The job completed successfully.", - "The job failed.", - "The job is being cancelled. From this state the job may only go to either `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.", - "The job has been cancelled.", - "The job has been stopped, and can be resumed.", - "The job has expired.", - "The job is being updated. Only jobs in the `RUNNING` state can be updated. After updating, the job goes back to the `RUNNING` state.", - "The job is partially succeeded, some results may be missing due to errors." - ], - "readOnly": true, - "type": "string" - }, - "supervisedTuningSpec": { - "$ref": "GoogleCloudAiplatformV1SupervisedTuningSpec", - "description": "Tuning Spec for Supervised Fine Tuning." - }, - "tunedModel": { - "$ref": "GoogleCloudAiplatformV1TunedModel", - "description": "Output only. The tuned model resources assiociated with this TuningJob.", - "readOnly": true - }, - "tunedModelDisplayName": { - "description": "Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters.", - "type": "string" - }, - "tuningDataStats": { - "$ref": "GoogleCloudAiplatformV1TuningDataStats", - "description": "Output only. The tuning data statistics associated with this TuningJob.", - "readOnly": true - }, - "updateTime": { - "description": "Output only. Time when the TuningJob was most recently updated.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UndeployIndexOperationMetadata": { - "description": "Runtime operation information for IndexEndpointService.UndeployIndex.", - "id": "GoogleCloudAiplatformV1UndeployIndexOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UndeployIndexRequest": { - "description": "Request message for IndexEndpointService.UndeployIndex.", - "id": "GoogleCloudAiplatformV1UndeployIndexRequest", - "properties": { - "deployedIndexId": { - "description": "Required. The ID of the DeployedIndex to be undeployed from the IndexEndpoint.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UndeployIndexResponse": { - "description": "Response message for IndexEndpointService.UndeployIndex.", - "id": "GoogleCloudAiplatformV1UndeployIndexResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1UndeployModelOperationMetadata": { - "description": "Runtime operation information for EndpointService.UndeployModel.", - "id": "GoogleCloudAiplatformV1UndeployModelOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UndeployModelRequest": { - "description": "Request message for EndpointService.UndeployModel.", - "id": "GoogleCloudAiplatformV1UndeployModelRequest", - "properties": { - "deployedModelId": { - "description": "Required. The ID of the DeployedModel to be undeployed from the Endpoint.", - "type": "string" - }, - "trafficSplit": { - "additionalProperties": { - "format": "int32", - "type": "integer" - }, - "description": "If this field is provided, then the Endpoint's traffic_split will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A DeployedModel will be successfully undeployed only if it doesn't have any traffic assigned to it when this method executes, or if this field unassigns any traffic to it.", - "type": "object" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UndeployModelResponse": { - "description": "Response message for EndpointService.UndeployModel.", - "id": "GoogleCloudAiplatformV1UndeployModelResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1UnmanagedContainerModel": { - "description": "Contains model information necessary to perform batch prediction without requiring a full model import.", - "id": "GoogleCloudAiplatformV1UnmanagedContainerModel", - "properties": { - "artifactUri": { - "description": "The path to the directory containing the Model artifact and any of its supporting files.", - "type": "string" - }, - "containerSpec": { - "$ref": "GoogleCloudAiplatformV1ModelContainerSpec", - "description": "Input only. The specification of the container that is to be used when deploying this Model." - }, - "predictSchemata": { - "$ref": "GoogleCloudAiplatformV1PredictSchemata", - "description": "Contains the schemata used in Model's predictions and explanations" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateDeploymentResourcePoolOperationMetadata": { - "description": "Runtime operation information for UpdateDeploymentResourcePool method.", - "id": "GoogleCloudAiplatformV1UpdateDeploymentResourcePoolOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateExplanationDatasetOperationMetadata": { - "description": "Runtime operation information for ModelService.UpdateExplanationDataset.", - "id": "GoogleCloudAiplatformV1UpdateExplanationDatasetOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The common part of the operation metadata." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateExplanationDatasetRequest": { - "description": "Request message for ModelService.UpdateExplanationDataset.", - "id": "GoogleCloudAiplatformV1UpdateExplanationDatasetRequest", - "properties": { - "examples": { - "$ref": "GoogleCloudAiplatformV1Examples", - "description": "The example config containing the location of the dataset." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateExplanationDatasetResponse": { - "description": "Response message of ModelService.UpdateExplanationDataset operation.", - "id": "GoogleCloudAiplatformV1UpdateExplanationDatasetResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateFeatureGroupOperationMetadata": { - "description": "Details of operations that perform update FeatureGroup.", - "id": "GoogleCloudAiplatformV1UpdateFeatureGroupOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for FeatureGroup." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateFeatureOnlineStoreOperationMetadata": { - "description": "Details of operations that perform update FeatureOnlineStore.", - "id": "GoogleCloudAiplatformV1UpdateFeatureOnlineStoreOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for FeatureOnlineStore." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateFeatureOperationMetadata": { - "description": "Details of operations that perform update Feature.", - "id": "GoogleCloudAiplatformV1UpdateFeatureOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for Feature Update." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateFeatureViewOperationMetadata": { - "description": "Details of operations that perform update FeatureView.", - "id": "GoogleCloudAiplatformV1UpdateFeatureViewOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for FeatureView Update." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateFeaturestoreOperationMetadata": { - "description": "Details of operations that perform update Featurestore.", - "id": "GoogleCloudAiplatformV1UpdateFeaturestoreOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for Featurestore." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateIndexOperationMetadata": { - "description": "Runtime operation information for IndexService.UpdateIndex.", - "id": "GoogleCloudAiplatformV1UpdateIndexOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - }, - "nearestNeighborSearchOperationMetadata": { - "$ref": "GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata", - "description": "The operation metadata with regard to Matching Engine Index operation." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateModelDeploymentMonitoringJobOperationMetadata": { - "description": "Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob.", - "id": "GoogleCloudAiplatformV1UpdateModelDeploymentMonitoringJobOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdatePersistentResourceOperationMetadata": { - "description": "Details of operations that perform update PersistentResource.", - "id": "GoogleCloudAiplatformV1UpdatePersistentResourceOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for PersistentResource." - }, - "progressMessage": { - "description": "Progress Message for Update LRO", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateSpecialistPoolOperationMetadata": { - "description": "Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool.", - "id": "GoogleCloudAiplatformV1UpdateSpecialistPoolOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - }, - "specialistPool": { - "description": "Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata": { - "description": "Details of operations that perform update Tensorboard.", - "id": "GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "Operation metadata for Tensorboard." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpgradeNotebookRuntimeOperationMetadata": { - "description": "Metadata information for NotebookService.UpgradeNotebookRuntime.", - "id": "GoogleCloudAiplatformV1UpgradeNotebookRuntimeOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The operation generic information." - }, - "progressMessage": { - "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest": { - "description": "Request message for NotebookService.UpgradeNotebookRuntime.", - "id": "GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1UploadModelOperationMetadata": { - "description": "Details of ModelService.UploadModel operation.", - "id": "GoogleCloudAiplatformV1UploadModelOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", - "description": "The common part of the operation metadata." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UploadModelRequest": { - "description": "Request message for ModelService.UploadModel.", - "id": "GoogleCloudAiplatformV1UploadModelRequest", - "properties": { - "model": { - "$ref": "GoogleCloudAiplatformV1Model", - "description": "Required. The Model to create." - }, - "modelId": { - "description": "Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen.", - "type": "string" - }, - "parentModel": { - "description": "Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version.", - "type": "string" - }, - "serviceAccount": { - "description": "Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.).", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UploadModelResponse": { - "description": "Response message of ModelService.UploadModel operation.", - "id": "GoogleCloudAiplatformV1UploadModelResponse", - "properties": { - "model": { - "description": "The name of the uploaded Model resource. Format: `projects/{project}/locations/{location}/models/{model}`", - "type": "string" - }, - "modelVersionId": { - "description": "Output only. The version ID of the model that is uploaded.", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpsertDatapointsRequest": { - "description": "Request message for IndexService.UpsertDatapoints", - "id": "GoogleCloudAiplatformV1UpsertDatapointsRequest", - "properties": { - "datapoints": { - "description": "A list of datapoints to be created/updated.", - "items": { - "$ref": "GoogleCloudAiplatformV1IndexDatapoint" - }, - "type": "array" - }, - "updateMask": { - "description": "Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts.", - "format": "google-fieldmask", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1UpsertDatapointsResponse": { - "description": "Response message for IndexService.UpsertDatapoints", - "id": "GoogleCloudAiplatformV1UpsertDatapointsResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1UserActionReference": { - "description": "References an API call. It contains more information about long running operation and Jobs that are triggered by the API call.", - "id": "GoogleCloudAiplatformV1UserActionReference", - "properties": { - "dataLabelingJob": { - "description": "For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`", - "type": "string" - }, - "method": { - "description": "The method name of the API RPC call. For example, \"/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset\"", - "type": "string" - }, - "operation": { - "description": "For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project}/locations/{location}/operations/{operation}`", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1Value": { - "description": "Value is the value of the field.", - "id": "GoogleCloudAiplatformV1Value", - "properties": { - "doubleValue": { - "description": "A double value.", - "format": "double", - "type": "number" - }, - "intValue": { - "description": "An integer value.", - "format": "int64", - "type": "string" - }, - "stringValue": { - "description": "A string value.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1VertexAISearch": { - "description": "Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation", - "id": "GoogleCloudAiplatformV1VertexAISearch", - "properties": { - "datastore": { - "description": "Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1VideoMetadata": { - "description": "Metadata describes the input video content.", - "id": "GoogleCloudAiplatformV1VideoMetadata", - "properties": { - "endOffset": { - "description": "Optional. The end offset of the video.", - "format": "google-duration", - "type": "string" - }, - "startOffset": { - "description": "Optional. The start offset of the video.", - "format": "google-duration", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1WorkerPoolSpec": { - "description": "Represents the spec of a worker pool in a job.", - "id": "GoogleCloudAiplatformV1WorkerPoolSpec", - "properties": { - "containerSpec": { - "$ref": "GoogleCloudAiplatformV1ContainerSpec", - "description": "The custom container task." - }, - "diskSpec": { - "$ref": "GoogleCloudAiplatformV1DiskSpec", - "description": "Disk spec." - }, - "machineSpec": { - "$ref": "GoogleCloudAiplatformV1MachineSpec", - "description": "Optional. Immutable. The specification of a single machine." - }, - "nfsMounts": { - "description": "Optional. List of NFS mount spec.", - "items": { - "$ref": "GoogleCloudAiplatformV1NfsMount" - }, - "type": "array" - }, - "pythonPackageSpec": { - "$ref": "GoogleCloudAiplatformV1PythonPackageSpec", - "description": "The Python packaged task." - }, - "replicaCount": { - "description": "Optional. The number of worker replicas to use for this worker pool.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1WriteFeatureValuesPayload": { - "description": "Contains Feature values to be written for a specific entity.", - "id": "GoogleCloudAiplatformV1WriteFeatureValuesPayload", - "properties": { - "entityId": { - "description": "Required. The ID of the entity.", - "type": "string" - }, - "featureValues": { - "additionalProperties": { - "$ref": "GoogleCloudAiplatformV1FeatureValue" - }, - "description": "Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future.", - "type": "object" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1WriteFeatureValuesRequest": { - "description": "Request message for FeaturestoreOnlineServingService.WriteFeatureValues.", - "id": "GoogleCloudAiplatformV1WriteFeatureValuesRequest", - "properties": { - "payloads": { - "description": "Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`.", - "items": { - "$ref": "GoogleCloudAiplatformV1WriteFeatureValuesPayload" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1WriteFeatureValuesResponse": { - "description": "Response message for FeaturestoreOnlineServingService.WriteFeatureValues.", - "id": "GoogleCloudAiplatformV1WriteFeatureValuesResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest": { - "description": "Request message for TensorboardService.WriteTensorboardExperimentData.", - "id": "GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest", - "properties": { - "writeRunDataRequests": { - "description": "Required. Requests containing per-run TensorboardTimeSeries data to write.", - "items": { - "$ref": "GoogleCloudAiplatformV1WriteTensorboardRunDataRequest" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse": { - "description": "Response message for TensorboardService.WriteTensorboardExperimentData.", - "id": "GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1WriteTensorboardRunDataRequest": { - "description": "Request message for TensorboardService.WriteTensorboardRunData.", - "id": "GoogleCloudAiplatformV1WriteTensorboardRunDataRequest", - "properties": { - "tensorboardRun": { - "description": "Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`", - "type": "string" - }, - "timeSeriesData": { - "description": "Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000.", - "items": { - "$ref": "GoogleCloudAiplatformV1TimeSeriesData" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1WriteTensorboardRunDataResponse": { - "description": "Response message for TensorboardService.WriteTensorboardRunData.", - "id": "GoogleCloudAiplatformV1WriteTensorboardRunDataResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1XraiAttribution": { - "description": "An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models.", - "id": "GoogleCloudAiplatformV1XraiAttribution", - "properties": { - "blurBaselineConfig": { - "$ref": "GoogleCloudAiplatformV1BlurBaselineConfig", - "description": "Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383" - }, - "smoothGradConfig": { - "$ref": "GoogleCloudAiplatformV1SmoothGradConfig", - "description": "Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf" - }, - "stepCount": { - "description": "Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleCloudLocationListLocationsResponse": { - "description": "The response message for Locations.ListLocations.", - "id": "GoogleCloudLocationListLocationsResponse", - "properties": { - "locations": { - "description": "A list of locations that matches the specified filter in the request.", - "items": { - "$ref": "GoogleCloudLocationLocation" - }, - "type": "array" - }, - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudLocationLocation": { - "description": "A resource that represents a Google Cloud location.", - "id": "GoogleCloudLocationLocation", - "properties": { - "displayName": { - "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", - "type": "object" - }, - "locationId": { - "description": "The canonical id for this location. For example: `\"us-east1\"`.", - "type": "string" - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata. For example the available capacity at the given location.", - "type": "object" - }, - "name": { - "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", - "type": "string" - } - }, - "type": "object" - }, - "GoogleIamV1Binding": { - "description": "Associates `members`, or principals, with a `role`.", - "id": "GoogleIamV1Binding", - "properties": { - "condition": { - "$ref": "GoogleTypeExpr", - "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." - }, - "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.", - "items": { - "type": "string" - }, - "type": "array" - }, - "role": { - "description": "Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).", - "type": "string" - } - }, - "type": "object" - }, - "GoogleIamV1Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", - "id": "GoogleIamV1Policy", - "properties": { - "bindings": { - "description": "Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.", - "items": { - "$ref": "GoogleIamV1Binding" - }, - "type": "array" - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", - "format": "byte", - "type": "string" - }, - "version": { - "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleIamV1SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "id": "GoogleIamV1SetIamPolicyRequest", - "properties": { - "policy": { - "$ref": "GoogleIamV1Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." - } - }, - "type": "object" - }, - "GoogleIamV1TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", - "id": "GoogleIamV1TestIamPermissionsResponse", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleLongrunningListOperationsResponse": { - "description": "The response message for Operations.ListOperations.", - "id": "GoogleLongrunningListOperationsResponse", - "properties": { - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - }, - "operations": { - "description": "A list of operations that matches the specified filter in the request.", - "items": { - "$ref": "GoogleLongrunningOperation" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleLongrunningOperation": { - "description": "This resource represents a long-running operation that is the result of a network API call.", - "id": "GoogleLongrunningOperation", - "properties": { - "done": { - "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", - "type": "boolean" - }, - "error": { - "$ref": "GoogleRpcStatus", - "description": "The error result of the operation in case of failure or cancellation." - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", - "type": "object" - }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", - "type": "string" - }, - "response": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", - "type": "object" - } - }, - "type": "object" - }, - "GoogleProtobufEmpty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", - "id": "GoogleProtobufEmpty", - "properties": {}, - "type": "object" - }, - "GoogleRpcStatus": { - "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", - "id": "GoogleRpcStatus", - "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" - }, - "details": { - "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleTypeColor": { - "description": "Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ...", - "id": "GoogleTypeColor", - "properties": { - "alpha": { - "description": "The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0).", - "format": "float", - "type": "number" - }, - "blue": { - "description": "The amount of blue in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - }, - "green": { - "description": "The amount of green in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - }, - "red": { - "description": "The amount of red in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "GoogleTypeDate": { - "description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp", - "id": "GoogleTypeDate", - "properties": { - "day": { - "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.", - "format": "int32", - "type": "integer" - }, - "month": { - "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.", - "format": "int32", - "type": "integer" - }, - "year": { - "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleTypeExpr": { - "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", - "id": "GoogleTypeExpr", - "properties": { - "description": { - "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", - "type": "string" - }, - "expression": { - "description": "Textual representation of an expression in Common Expression Language syntax.", - "type": "string" - }, - "location": { - "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", - "type": "string" - }, - "title": { - "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleTypeInterval": { - "description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", - "id": "GoogleTypeInterval", - "properties": { - "endTime": { - "description": "Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end.", - "format": "google-datetime", - "type": "string" - }, - "startTime": { - "description": "Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start.", - "format": "google-datetime", - "type": "string" - } - }, - "type": "object" - }, - "GoogleTypeMoney": { - "description": "Represents an amount of money with its currency type.", - "id": "GoogleTypeMoney", - "properties": { - "currencyCode": { - "description": "The three-letter currency code defined in ISO 4217.", - "type": "string" - }, - "nanos": { - "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", - "format": "int32", - "type": "integer" - }, - "units": { - "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "IntelligenceCloudAutomlXpsMetricEntry": { - "id": "IntelligenceCloudAutomlXpsMetricEntry", - "properties": { - "argentumMetricId": { - "description": "For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the \"cloudbilling.googleapis.com/argentum_metric_id\" label. Otherwise leave empty.", - "type": "string" - }, - "doubleValue": { - "description": "A double value.", - "format": "double", - "type": "number" - }, - "int64Value": { - "description": "A signed 64-bit integer value.", - "format": "int64", - "type": "string" - }, - "metricName": { - "description": "The metric name defined in the service configuration.", - "type": "string" - }, - "systemLabels": { - "description": "Billing system labels for this (metric, value) pair.", - "items": { - "$ref": "IntelligenceCloudAutomlXpsMetricEntryLabel" - }, - "type": "array" - } - }, - "type": "object" - }, - "IntelligenceCloudAutomlXpsMetricEntryLabel": { - "id": "IntelligenceCloudAutomlXpsMetricEntryLabel", - "properties": { - "labelName": { - "description": "The name of the label.", - "type": "string" - }, - "labelValue": { - "description": "The value of the label.", - "type": "string" - } - }, - "type": "object" - }, - "IntelligenceCloudAutomlXpsReportingMetrics": { - "id": "IntelligenceCloudAutomlXpsReportingMetrics", - "properties": { - "effectiveTrainingDuration": { - "deprecated": true, - "description": "The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set.", - "format": "google-duration", - "type": "string" - }, - "metricEntries": { - "description": "One entry per metric name. The values must be aggregated per metric name.", - "items": { - "$ref": "IntelligenceCloudAutomlXpsMetricEntry" - }, - "type": "array" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoDocAttribution": { - "description": "The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30", - "id": "LanguageLabsAidaTrustRecitationProtoDocAttribution", - "properties": { - "amarnaId": { - "type": "string" - }, - "arxivId": { - "type": "string" - }, - "author": { - "type": "string" - }, - "bibkey": { - "type": "string" - }, - "biorxivId": { - "description": "ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517", - "type": "string" - }, - "bookTitle": { - "type": "string" - }, - "bookVolumeId": { - "description": "The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set.", - "format": "int64", - "type": "string" - }, - "category": { - "enum": [ - "CATEGORY_UNSPECIFIED", - "CATEGORY_NEWS", - "CATEGORY_NON_NEWS_WEBDOC", - "CATEGORY_UNKNOWN_MISSING_SIGNAL" - ], - "enumDescriptions": [ - "", - "The doc has a url and the news classifier has classified this doc as news.", - "The doc has a url and the news classifier classified this doc as non-news.", - "The doc has a url but the url was missing from the news classifier URL table." - ], - "type": "string" - }, - "conversationId": { - "type": "string" - }, - "dataset": { - "description": "The dataset this document comes from.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Github dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset.", - "Mobile assistant finetune datasets.", - "", - "Genesis fine-tune datasets.", - "Cloud Security fine-tune datasets.", - "", - "", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inpsiration." - ], - "type": "string" - }, - "filepath": { - "type": "string" - }, - "geminiId": { - "type": "string" - }, - "gnewsArticleTitle": { - "type": "string" - }, - "goodallExampleId": { - "type": "string" - }, - "isOptOut": { - "description": "Whether the document is opted out.", - "type": "boolean" - }, - "isPrompt": { - "type": "boolean" - }, - "lamdaExampleId": { - "type": "string" - }, - "license": { - "type": "string" - }, - "meenaConversationId": { - "type": "string" - }, - "naturalLanguageCode": { - "description": "Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii.", - "type": "string" - }, - "noAttribution": { - "description": "True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available.", - "type": "boolean" - }, - "podcastUtteranceId": { - "type": "string" - }, - "publicationDate": { - "$ref": "GoogleTypeDate" - }, - "qualityScoreExperimentOnly": { - "description": "This field is for opt-out experiment only, MUST never be used during actual production/serving. ", - "format": "double", - "type": "number" - }, - "repo": { - "description": "Github repository", - "type": "string" - }, - "url": { - "description": "URL of a webdoc", - "type": "string" - }, - "volumeId": { - "type": "string" - }, - "wikipediaArticleTitle": { - "description": "Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset.", - "type": "string" - }, - "youtubeVideoId": { - "description": "The unique video id from Youtube. Example: AkoGsW52Ir0", - "type": "string" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoRecitationResult": { - "description": "The recitation result for one input", - "id": "LanguageLabsAidaTrustRecitationProtoRecitationResult", - "properties": { - "dynamicSegmentResults": { - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - }, - "recitationAction": { - "description": "The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will not be specified.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "trainingSegmentResults": { - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoSegmentResult": { - "description": "The recitation result for each segment in a given input.", - "id": "LanguageLabsAidaTrustRecitationProtoSegmentResult", - "properties": { - "attributionDataset": { - "description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Github dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset.", - "Mobile assistant finetune datasets.", - "", - "Genesis fine-tune datasets.", - "Cloud Security fine-tune datasets.", - "", - "", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inpsiration." - ], - "type": "string" - }, - "displayAttributionMessage": { - "description": "human-friendly string that contains information from doc_attribution which could be shown by clients", - "type": "string" - }, - "docAttribution": { - "$ref": "LanguageLabsAidaTrustRecitationProtoDocAttribution" - }, - "docOccurrences": { - "description": "number of documents that contained this segment", - "format": "int32", - "type": "integer" - }, - "endIndex": { - "format": "int32", - "type": "integer" - }, - "rawText": { - "description": "The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options.", - "type": "string" - }, - "segmentRecitationAction": { - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "sourceCategory": { - "description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", - "enum": [ - "SOURCE_CATEGORY_UNSPECIFIED", - "SOURCE_CATEGORY_WIKIPEDIA", - "SOURCE_CATEGORY_WEBDOCS", - "SOURCE_CATEGORY_GITHUB", - "SOURCE_CATEGORY_ARXIV", - "SOURCE_CATEGORY_PRIVATE_BOOKS", - "SOURCE_CATEGORY_OTHERS", - "SOURCE_CATEGORY_PUBLIC_BOOKS", - "SOURCE_CATEGORY_GNEWS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "startIndex": { - "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoStreamRecitationResult": { - "description": "The recitation result for one stream input", - "id": "LanguageLabsAidaTrustRecitationProtoStreamRecitationResult", - "properties": { - "dynamicSegmentResults": { - "description": "The recitation result against the given dynamic data source.", - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - }, - "fullyCheckedTextIndex": { - "description": "Last index of input text fully checked for recitation in the entire streaming context. Would return `-1` if no Input was checked for recitation.", - "format": "int32", - "type": "integer" - }, - "recitationAction": { - "description": "The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "trainingSegmentResults": { - "description": "The recitation result against model training data.", - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRecitationContentChunkRecitationCheckResult": { - "description": "Recitation check result for a single content chunk.", - "id": "LearningGenaiRecitationContentChunkRecitationCheckResult", - "properties": { - "imageResult": { - "$ref": "LearningGenaiRecitationImageRecitationCheckResult" - }, - "textResult": { - "$ref": "LearningGenaiRecitationRecitationResult" - } - }, - "type": "object" - }, - "LearningGenaiRecitationDocAttribution": { - "description": "The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30", - "id": "LearningGenaiRecitationDocAttribution", - "properties": { - "amarnaId": { - "type": "string" - }, - "arxivId": { - "type": "string" - }, - "author": { - "type": "string" - }, - "bibkey": { - "type": "string" - }, - "biorxivId": { - "description": "ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517", - "type": "string" - }, - "bookTitle": { - "type": "string" - }, - "bookVolumeId": { - "description": "The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set.", - "format": "int64", - "type": "string" - }, - "conversationId": { - "type": "string" - }, - "dataset": { - "description": "The dataset this document comes from.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "GitHub dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset", - "Mobile assistant finetune datasets.", - "", - "Genesis fine tuned datasets.", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Cloud Security fine tuned datasets.", - "", - "", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inspiration FT datasets." - ], - "type": "string" - }, - "filepath": { - "type": "string" - }, - "geminiId": { - "type": "string" - }, - "gnewsArticleTitle": { - "type": "string" - }, - "goodallExampleId": { - "type": "string" - }, - "isOptOut": { - "description": "Whether the document is opted out.", - "type": "boolean" - }, - "isPrompt": { - "description": "When true, this attribution came from the user's prompt.", - "type": "boolean" - }, - "lamdaExampleId": { - "type": "string" - }, - "license": { - "type": "string" - }, - "meenaConversationId": { - "type": "string" - }, - "naturalLanguageCode": { - "description": "Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii.", - "type": "string" - }, - "noAttribution": { - "description": "True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available.", - "type": "boolean" - }, - "podcastUtteranceId": { - "type": "string" - }, - "publicationDate": { - "$ref": "GoogleTypeDate" - }, - "qualityScoreExperimentOnly": { - "description": "This field is for opt-out experiment only, MUST never be used during actual production/serving. ", - "format": "double", - "type": "number" - }, - "repo": { - "description": "Github repository", - "type": "string" - }, - "url": { - "description": "URL of a webdoc", - "type": "string" - }, - "volumeId": { - "type": "string" - }, - "wikipediaArticleTitle": { - "description": "Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset.", - "type": "string" - }, - "youtubeVideoId": { - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRecitationImageDocAttribution": { - "description": "Attribution information about the recited image.", - "id": "LearningGenaiRecitationImageDocAttribution", - "properties": { - "datasetName": { - "description": "Unique ID of the image.", - "enum": [ - "IMAGE_DATA_SET_UNSPECIFIED", - "JUNO_SHUTTERSTOCK_V1", - "JUNO_V1_HIPR", - "JUNO_V1_WEBLI_AESTHETICS_V2_4_5", - "JUNO_V1_TIGG_DATA_V1", - "JUNO_V1_PINTEREST_A", - "JUNO_V1_PINTEREST_B", - "JUNO_V1_IMAGEJOINS", - "JUNO_V1_M1", - "JUNO_V1_M2_MISC", - "JUNO_V1_M2_ART", - "JUNO_V1_DEVIANTAR", - "IMAGE_PASSAGE" - ], - "enumDescriptions": [ - "", - "go/tigg-shutterstock", - "go/hipr-in-tigg-notes", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "go/image-passage" - ], - "type": "string" - }, - "stringDocids": { - "description": "Doc ID to identify the image. These could be urls of images or amarna id.", - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRecitationImageRecitationCheckResult": { - "id": "LearningGenaiRecitationImageRecitationCheckResult", - "properties": { - "recitationAction": { - "description": "Only has NO_ACTION or BLOCK to start with.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "recitedImages": { - "description": "Images that are similar to the requested image.", - "items": { - "$ref": "LearningGenaiRecitationImageRecitationCheckResultSimilarImage" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRecitationImageRecitationCheckResultSimilarImage": { - "id": "LearningGenaiRecitationImageRecitationCheckResultSimilarImage", - "properties": { - "docAttribution": { - "$ref": "LearningGenaiRecitationImageDocAttribution", - "description": "Attribution information about the image" - }, - "embeddingModel": { - "description": "The memorization embedding model that returned this image", - "enum": [ - "EMBEDDING_MODEL_UNSPECIFIED", - "STARBURST_V4", - "REISIM" - ], - "enumDescriptions": [ - "", - "Starburst V4, 64 float features.", - "Reisim, 128 Byte float feature" - ], - "type": "string" - }, - "imageId": { - "description": "Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image.", - "format": "uint64", - "type": "string" - }, - "scores": { - "description": "Similarity score of requested image compared with image in training data.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "LearningGenaiRecitationMMRecitationCheckResult": { - "description": "Recitation check result for a stream of content chunks (e.g. a model response).", - "id": "LearningGenaiRecitationMMRecitationCheckResult", - "properties": { - "chunkResults": { - "items": { - "$ref": "LearningGenaiRecitationContentChunkRecitationCheckResult" - }, - "type": "array" - }, - "recitationAction": { - "description": "Overall recommended recitation action for the content.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRecitationRecitationResult": { - "description": "The recitation result for one input", - "id": "LearningGenaiRecitationRecitationResult", - "properties": { - "dynamicSegmentResults": { - "items": { - "$ref": "LearningGenaiRecitationSegmentResult" - }, - "type": "array" - }, - "recitationAction": { - "description": "The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "trainingSegmentResults": { - "items": { - "$ref": "LearningGenaiRecitationSegmentResult" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRecitationSegmentResult": { - "description": "The recitation result for each segment in a given input.", - "id": "LearningGenaiRecitationSegmentResult", - "properties": { - "attributionDataset": { - "description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "GitHub dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset", - "Mobile assistant finetune datasets.", - "", - "Genesis fine tuned datasets.", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Cloud Security fine tuned datasets.", - "", - "", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inspiration FT datasets." - ], - "type": "string" - }, - "displayAttributionMessage": { - "description": "human-friendly string that contains information from doc_attribution which could be shown by clients", - "type": "string" - }, - "docAttribution": { - "$ref": "LearningGenaiRecitationDocAttribution" - }, - "docOccurrences": { - "description": "number of documents that contained this segment", - "format": "int32", - "type": "integer" - }, - "endIndex": { - "format": "int32", - "type": "integer" - }, - "rawText": { - "description": "The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options.", - "type": "string" - }, - "segmentRecitationAction": { - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "sourceCategory": { - "description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", - "enum": [ - "SOURCE_CATEGORY_UNSPECIFIED", - "SOURCE_CATEGORY_WIKIPEDIA", - "SOURCE_CATEGORY_WEBDOCS", - "SOURCE_CATEGORY_GITHUB", - "SOURCE_CATEGORY_ARXIV", - "SOURCE_CATEGORY_PRIVATE_BOOKS", - "SOURCE_CATEGORY_OTHERS", - "SOURCE_CATEGORY_PUBLIC_BOOKS", - "SOURCE_CATEGORY_GNEWS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "startIndex": { - "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "LearningGenaiRootCalculationType": { - "description": "The type used for final weights calculation.", - "id": "LearningGenaiRootCalculationType", - "properties": { - "scoreType": { - "enum": [ - "TYPE_UNKNOWN", - "TYPE_SAFE", - "TYPE_POLICY", - "TYPE_GENERATION" - ], - "enumDescriptions": [ - "Unknown scorer type.", - "Safety scorer.", - "Policy scorer.", - "Generation scorer." - ], - "type": "string" - }, - "weights": { - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "LearningGenaiRootClassifierOutput": { - "id": "LearningGenaiRootClassifierOutput", - "properties": { - "ruleOutput": { - "$ref": "LearningGenaiRootRuleOutput", - "deprecated": true, - "description": "If set, this is the output of the first matching rule." - }, - "ruleOutputs": { - "description": "outputs of all matching rule.", - "items": { - "$ref": "LearningGenaiRootRuleOutput" - }, - "type": "array" - }, - "state": { - "$ref": "LearningGenaiRootClassifierState", - "description": "The results of data_providers and metrics." - } - }, - "type": "object" - }, - "LearningGenaiRootClassifierOutputSummary": { - "id": "LearningGenaiRootClassifierOutputSummary", - "properties": { - "metrics": { - "items": { - "$ref": "LearningGenaiRootMetricOutput" - }, - "type": "array" - }, - "ruleOutput": { - "$ref": "LearningGenaiRootRuleOutput", - "deprecated": true, - "description": "Output of the first matching rule." - }, - "ruleOutputs": { - "description": "outputs of all matching rule.", - "items": { - "$ref": "LearningGenaiRootRuleOutput" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootClassifierState": { - "description": "DataProviderOutput and MetricOutput can be saved between calls to the Classifier framework. For instance, you can run the query classifier, get outputs from those metrics, then use them in a result classifier as well. Example rule based on this idea: and_rules { rule { metric_name: 'query_safesearch_v2' ... } rule { metric_name: 'response_safesearch_v2' ... } }", - "id": "LearningGenaiRootClassifierState", - "properties": { - "dataProviderOutput": { - "items": { - "$ref": "LearningGenaiRootDataProviderOutput" - }, - "type": "array" - }, - "metricOutput": { - "items": { - "$ref": "LearningGenaiRootMetricOutput" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyChatMetadata": { - "description": "Stores all metadata relating to AIDA DoConversation.", - "id": "LearningGenaiRootCodeyChatMetadata", - "properties": { - "codeLanguage": { - "description": "Indicates the programming language of the code if the message is a code chunk.", - "enum": [ - "UNSPECIFIED", - "ALL", - "TEXT", - "CPP", - "PYTHON", - "KOTLIN", - "JAVA", - "JAVASCRIPT", - "GO", - "R", - "JUPYTER_NOTEBOOK", - "TYPESCRIPT", - "HTML", - "SQL", - "BASH", - "C", - "DART", - "GRADLE", - "GROOVY", - "JAVADOC", - "JSON", - "MAKEFILE", - "MARKDOWN", - "PROTO", - "XML", - "YAML" - ], - "enumDescriptions": [ - "Unspecified Language.", - "All languages.", - "Not code.", - "The most common, well-supported languages. C++ code.", - "Python code.", - "Kotlin code.", - "Java code.", - "JavaScript code.", - "Go code.", - "R code.", - "Jupyter notebook.", - "TypeScript code.", - "HTML code.", - "SQL code.", - "Other languages in alphabetical order. BASH code.", - "C code.", - "Dart code.", - "Gradle code.", - "Groovy code.", - "API documentation.", - "JSON code.", - "Makefile code.", - "Markdown code.", - "Protocol buffer.", - "XML code.", - "YAML code." - ], - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyCheckpoint": { - "description": "Describes a sample at a checkpoint for post-processing.", - "id": "LearningGenaiRootCodeyCheckpoint", - "properties": { - "codeyTruncatorMetadata": { - "$ref": "LearningGenaiRootCodeyTruncatorMetadata", - "description": "Metadata that describes what was truncated at this checkpoint." - }, - "currentSample": { - "description": "Current state of the sample after truncator.", - "type": "string" - }, - "postInferenceStep": { - "description": "Postprocessor run that yielded this checkpoint.", - "enum": [ - "STEP_POST_PROCESSING_STEP_UNSPECIFIED", - "STEP_ORIGINAL_MODEL_OUTPUT", - "STEP_MODEL_OUTPUT_DEDUPLICATION", - "STEP_STOP_SEQUENCE_TRUNCATION", - "STEP_HEURISTIC_TRUNCATION", - "STEP_WALD_TRUNCATION", - "STEP_WHITESPACE_TRUNCATION", - "STEP_FINAL_DEDUPLICATION", - "STEP_TOXICITY_CHECK", - "STEP_RECITATION_CHECK", - "STEP_RETURNED", - "STEP_WALKBACK_CORRECTION", - "STEP_SCORE_THRESHOLDING", - "STEP_MODEL_CONFIG_STOP_SEQUENCE_TRUNCATION", - "STEP_CUSTOM_STOP_SEQUENCE_TRUNCATION", - "STEP_EXPECTED_SAMPLE_SIZE", - "STEP_TREE_TRIM_TRUNCATION" - ], - "enumDeprecated": [ - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false - ], - "enumDescriptions": [ - "", - "Original model outputs as-is.", - "Original model outputs after deduplication.", - "StopSequencePostProcessor.", - "Heuristic SuffixTruncator step.", - "Go service post-processor.", - "Truncate trailing whitespace and filter whitespace-only completions.", - "Deduplicate after all truncations.", - "Toxicity returns true.", - "Recitation causes BLOCK.", - "Return the response to the API.", - "Correcting walkback constraint (samples are dropped if they don't match the prefix constraint).", - "Thresholding samples based on a minimum score.", - "StopSequencePostProcessor.", - "StopSequencePostProcessor.", - "Drop extra number of samples that exceeds expected_samples.", - "Truncated by highest end token score." - ], - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyCompletionMetadata": { - "description": "Stores all metadata relating to Completion.", - "id": "LearningGenaiRootCodeyCompletionMetadata", - "properties": { - "checkpoints": { - "items": { - "$ref": "LearningGenaiRootCodeyCheckpoint" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyGenerationMetadata": { - "description": "Stores all metadata relating to GenerateCode.", - "id": "LearningGenaiRootCodeyGenerationMetadata", - "properties": { - "output": { - "description": "Last state of the sample before getting dropped/returned.", - "type": "string" - }, - "postInferenceStep": { - "description": "Last Codey postprocessing step for this sample before getting dropped/returned.", - "enum": [ - "STEP_POST_PROCESSING_STEP_UNSPECIFIED", - "STEP_ORIGINAL_MODEL_OUTPUT", - "STEP_MODEL_OUTPUT_DEDUPLICATION", - "STEP_STOP_SEQUENCE_TRUNCATION", - "STEP_HEURISTIC_TRUNCATION", - "STEP_WALD_TRUNCATION", - "STEP_WHITESPACE_TRUNCATION", - "STEP_FINAL_DEDUPLICATION", - "STEP_TOXICITY_CHECK", - "STEP_RECITATION_CHECK", - "STEP_RETURNED", - "STEP_WALKBACK_CORRECTION", - "STEP_SCORE_THRESHOLDING", - "STEP_MODEL_CONFIG_STOP_SEQUENCE_TRUNCATION", - "STEP_CUSTOM_STOP_SEQUENCE_TRUNCATION", - "STEP_EXPECTED_SAMPLE_SIZE", - "STEP_TREE_TRIM_TRUNCATION" - ], - "enumDeprecated": [ - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false - ], - "enumDescriptions": [ - "", - "Original model outputs as-is.", - "Original model outputs after deduplication.", - "StopSequencePostProcessor.", - "Heuristic SuffixTruncator step.", - "Go service post-processor.", - "Truncate trailing whitespace and filter whitespace-only completions.", - "Deduplicate after all truncations.", - "Toxicity returns true.", - "Recitation causes BLOCK.", - "Return the response to the API.", - "Correcting walkback constraint (samples are dropped if they don't match the prefix constraint).", - "Thresholding samples based on a minimum score.", - "StopSequencePostProcessor.", - "StopSequencePostProcessor.", - "Drop extra number of samples that exceeds expected_samples.", - "Truncated by highest end token score." - ], - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyOutput": { - "description": "Top-level wrapper used to store all things codey-related.", - "id": "LearningGenaiRootCodeyOutput", - "properties": { - "codeyChatMetadata": { - "$ref": "LearningGenaiRootCodeyChatMetadata" - }, - "codeyCompletionMetadata": { - "$ref": "LearningGenaiRootCodeyCompletionMetadata" - }, - "codeyGenerationMetadata": { - "$ref": "LearningGenaiRootCodeyGenerationMetadata" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyTruncatorMetadata": { - "description": "Metadata describing what was truncated at each checkpoint.", - "id": "LearningGenaiRootCodeyTruncatorMetadata", - "properties": { - "cutoffIndex": { - "description": "Index of the current sample that trims off truncated text.", - "format": "int32", - "type": "integer" - }, - "truncatedText": { - "description": "Text that was truncated at a specific checkpoint.", - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootControlDecodingConfigThreshold": { - "description": "Score threshold for a category.", - "id": "LearningGenaiRootControlDecodingConfigThreshold", - "properties": { - "policy": { - "enum": [ - "UNSPECIFIED", - "DANGEROUS_CONTENT", - "HARASSMENT", - "HATE_SPEECH", - "SEXUALLY_EXPLICIT" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "scoreMax": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "LearningGenaiRootControlDecodingRecord": { - "description": "Holds one control decoding record.", - "id": "LearningGenaiRootControlDecodingRecord", - "properties": { - "prefixes": { - "description": "Prefixes feeded into scorer.", - "type": "string" - }, - "scores": { - "description": "Per policy scores returned from Scorer. Expect to have the same number of scores as in `thresholds`.", - "items": { - "$ref": "LearningGenaiRootControlDecodingRecordPolicyScore" - }, - "type": "array" - }, - "suffiexes": { - "description": "Suffixes feeded into scorer.", - "type": "string" - }, - "thresholds": { - "description": "Per policy thresholds from user config.", - "items": { - "$ref": "LearningGenaiRootControlDecodingConfigThreshold" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootControlDecodingRecordPolicyScore": { - "id": "LearningGenaiRootControlDecodingRecordPolicyScore", - "properties": { - "policy": { - "enum": [ - "UNSPECIFIED", - "DANGEROUS_CONTENT", - "HARASSMENT", - "HATE_SPEECH", - "SEXUALLY_EXPLICIT" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "score": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "LearningGenaiRootControlDecodingRecords": { - "id": "LearningGenaiRootControlDecodingRecords", - "properties": { - "records": { - "description": "One ControlDecodingRecord record maps to one rewind.", - "items": { - "$ref": "LearningGenaiRootControlDecodingRecord" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootDataProviderOutput": { - "id": "LearningGenaiRootDataProviderOutput", - "properties": { - "name": { - "type": "string" - }, - "status": { - "$ref": "UtilStatusProto", - "description": "If set, this DataProvider failed and this is the error message." - } - }, - "type": "object" - }, - "LearningGenaiRootFilterMetadata": { - "id": "LearningGenaiRootFilterMetadata", - "properties": { - "confidence": { - "description": "Filter confidence.", - "enum": [ - "FILTER_CONFIDENCE_UNKNOWN", - "FILTER_CONFIDENCE_VERY_LOW", - "FILTER_CONFIDENCE_LOW", - "FILTER_CONFIDENCE_MEDIUM", - "FILTER_CONFIDENCE_HIGH", - "FILTER_CONFIDENCE_VERY_HIGH" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "debugInfo": { - "$ref": "LearningGenaiRootFilterMetadataFilterDebugInfo", - "description": "Debug info for the message." - }, - "fallback": { - "description": "A fallback message chosen by the applied filter.", - "type": "string" - }, - "info": { - "description": "Additional info for the filter.", - "type": "string" - }, - "name": { - "description": "Name of the filter that triggered.", - "type": "string" - }, - "reason": { - "description": "Filter reason.", - "enum": [ - "FILTER_REASON_UNKNOWN", - "FILTER_REASON_NOT_FILTERED", - "FILTER_REASON_SENSITIVE", - "FILTER_REASON_RECITATION", - "FILTER_REASON_LANGUAGE", - "FILTER_REASON_TAKEDOWN", - "FILTER_REASON_CLASSIFIER", - "FILTER_REASON_EMPTY_RESPONSE", - "FILTER_REASON_SIMILARITY_TAKEDOWN", - "FILTER_REASON_UNSAFE", - "FILTER_REASON_PAIRWISE_CLASSIFIER", - "FILTER_REASON_CODEY", - "FILTER_REASON_URL", - "FILTER_REASON_EMAIL", - "FILTER_REASON_SAFETY_CAT", - "FILTER_REASON_REQUEST_RESPONSE_TAKEDOWN", - "FILTER_REASON_RAI_PQC", - "FILTER_REASON_ATLAS", - "FILTER_REASON_RAI_CSAM", - "FILTER_REASON_RAI_FRINGE", - "FILTER_REASON_RAI_SPII", - "FILTER_REASON_RAI_IMAGE_VIOLENCE", - "FILTER_REASON_RAI_IMAGE_PORN", - "FILTER_REASON_RAI_IMAGE_CSAM", - "FILTER_REASON_RAI_IMAGE_PEDO", - "FILTER_REASON_RAI_IMAGE_CHILD", - "FILTER_REASON_RAI_VIDEO_FRAME_VIOLENCE", - "FILTER_REASON_RAI_VIDEO_FRAME_PORN", - "FILTER_REASON_RAI_VIDEO_FRAME_CSAM", - "FILTER_REASON_RAI_VIDEO_FRAME_PEDO", - "FILTER_REASON_RAI_VIDEO_FRAME_CHILD", - "FILTER_REASON_RAI_CONTEXTUAL_DANGEROUS", - "FILTER_REASON_RAI_GRAIL_TEXT", - "FILTER_REASON_RAI_GRAIL_IMAGE", - "FILTER_REASON_RAI_SAFETYCAT", - "FILTER_REASON_TOXICITY", - "FILTER_REASON_ATLAS_PRICING", - "FILTER_REASON_ATLAS_BILLING", - "FILTER_REASON_ATLAS_NON_ENGLISH_QUESTION", - "FILTER_REASON_ATLAS_NOT_RELATED_TO_GCP", - "FILTER_REASON_ATLAS_AWS_AZURE_RELATED", - "FILTER_REASON_XAI", - "FILTER_CONTROL_DECODING" - ], - "enumDescriptions": [ - "Unknown filter reason.", - "Input not filtered.", - "Sensitive content.", - "Recited content.", - "Language filtering", - "Takedown policy", - "Classifier Module", - "Empty response message.", - "Similarity takedown.", - "Unsafe responses from scorers.", - "Pairwise classifier.", - "Codey Filter.", - "URLs Filter.", - "Emails Filter.", - "SafetyCat filter.", - "Request Response takedown.", - "RAI Filter.", - "Atlas specific topic filter", - "RAI Filter.", - "RAI Filter.", - "RAI Filter.", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "Grail Text", - "Grail Image", - "SafetyCat.", - "Toxic content.", - "Atlas specific topic filter for pricing questions.", - "Atlas specific topic filter for billing questions.", - "Atlas specific topic filter for non english questions.", - "Atlas specific topic filter for non GCP questions.", - "Atlas specific topic filter aws/azure related questions.", - "Right now we don't do any filtering for XAI. Adding this just want to differentiatiat the XAI output metadata from other SafetyCat RAI output metadata", - "The response are filtered because it could not pass the control decoding thresholds and the maximum rewind attempts is reached." - ], - "type": "string" - }, - "text": { - "description": "The input query or generated response that is getting filtered.", - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootFilterMetadataFilterDebugInfo": { - "id": "LearningGenaiRootFilterMetadataFilterDebugInfo", - "properties": { - "classifierOutput": { - "$ref": "LearningGenaiRootClassifierOutput" - }, - "defaultMetadata": { - "type": "string" - }, - "languageFilterResult": { - "$ref": "LearningGenaiRootLanguageFilterResult" - }, - "raiOutput": { - "$ref": "LearningGenaiRootRAIOutput", - "description": "Safety filter output information for LLM Root RAI harm check." - }, - "raiResult": { - "$ref": "CloudAiNlLlmProtoServiceRaiResult" - }, - "raiSignal": { - "$ref": "CloudAiNlLlmProtoServiceRaiSignal", - "deprecated": true - }, - "records": { - "$ref": "LearningGenaiRootControlDecodingRecords", - "description": "Number of rewinds by controlled decoding." - }, - "streamRecitationResult": { - "$ref": "LanguageLabsAidaTrustRecitationProtoStreamRecitationResult", - "deprecated": true - }, - "takedownResult": { - "$ref": "LearningGenaiRootTakedownResult" - }, - "toxicityResult": { - "$ref": "LearningGenaiRootToxicityResult" + "value": { + "description": "Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'.", + "readOnly": true, + "type": "any" } }, "type": "object" }, - "LearningGenaiRootGroundingMetadata": { - "id": "LearningGenaiRootGroundingMetadata", + "GoogleCloudAiplatformV1TunedModel": { + "description": "The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob.", + "id": "GoogleCloudAiplatformV1TunedModel", "properties": { - "citations": { - "items": { - "$ref": "LearningGenaiRootGroundingMetadataCitation" - }, - "type": "array" - }, - "groundingCancelled": { - "description": "True if grounding is cancelled, for example, no facts being retrieved.", - "type": "boolean" + "endpoint": { + "description": "Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.", + "readOnly": true, + "type": "string" }, - "searchQueries": { - "items": { - "type": "string" - }, - "type": "array" + "model": { + "description": "Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}`.", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "LearningGenaiRootGroundingMetadataCitation": { - "id": "LearningGenaiRootGroundingMetadataCitation", + "GoogleCloudAiplatformV1TuningDataStats": { + "description": "The tuning data statistic values for TuningJob.", + "id": "GoogleCloudAiplatformV1TuningDataStats", "properties": { - "endIndex": { - "description": "Index in the prediction output where the citation ends (exclusive). Must be > start_index and <= len(output).", - "format": "int32", - "type": "integer" - }, - "factIndex": { - "description": "Index of the fact supporting this claim. Should be within the range of the `world_facts` in the GenerateResponse.", - "format": "int32", - "type": "integer" - }, - "score": { - "description": "Confidence score of this entailment. Value is [0,1] with 1 is the most confidence.", - "format": "double", - "type": "number" - }, - "startIndex": { - "description": "Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index.", - "format": "int32", - "type": "integer" + "supervisedTuningDataStats": { + "$ref": "GoogleCloudAiplatformV1SupervisedTuningDataStats", + "description": "The SFT Tuning data stats." } }, "type": "object" }, - "LearningGenaiRootHarm": { - "id": "LearningGenaiRootHarm", + "GoogleCloudAiplatformV1TuningJob": { + "description": "Represents a TuningJob that runs with Google owned models.", + "id": "GoogleCloudAiplatformV1TuningJob", "properties": { - "contextualDangerous": { - "description": "Please do not use, this is still under development.", - "type": "boolean" - }, - "csam": { - "type": "boolean" - }, - "fringe": { - "type": "boolean" - }, - "grailImageHarmType": { - "$ref": "LearningGenaiRootHarmGrailImageHarmType" + "baseModel": { + "description": "The base model that is being tuned, e.g., \"gemini-1.0-pro-002\".", + "type": "string" }, - "grailTextHarmType": { - "$ref": "LearningGenaiRootHarmGrailTextHarmType" + "createTime": { + "description": "Output only. Time when the TuningJob was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" }, - "imageChild": { - "type": "boolean" + "description": { + "description": "Optional. The description of the TuningJob.", + "type": "string" }, - "imageCsam": { - "type": "boolean" + "encryptionSpec": { + "$ref": "GoogleCloudAiplatformV1EncryptionSpec", + "description": "Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key." }, - "imagePedo": { - "type": "boolean" + "endTime": { + "description": "Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`.", + "format": "google-datetime", + "readOnly": true, + "type": "string" }, - "imagePorn": { - "description": "Image signals", - "type": "boolean" + "error": { + "$ref": "GoogleRpcStatus", + "description": "Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.", + "readOnly": true }, - "imageViolence": { - "type": "boolean" + "experiment": { + "description": "Output only. The Experiment associated with this TuningJob.", + "readOnly": true, + "type": "string" }, - "pqc": { - "type": "boolean" + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.", + "type": "object" }, - "safetycat": { - "$ref": "LearningGenaiRootHarmSafetyCatCategories" + "name": { + "description": "Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}`", + "readOnly": true, + "type": "string" }, - "spii": { - "$ref": "LearningGenaiRootHarmSpiiFilter", - "description": "Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l=77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for \"POSSIBLE\" is 3 / 5 = 0.6 ." + "startTime": { + "description": "Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state.", + "format": "google-datetime", + "readOnly": true, + "type": "string" }, - "threshold": { - "format": "double", - "type": "number" + "state": { + "description": "Output only. The detailed state of the job.", + "enum": [ + "JOB_STATE_UNSPECIFIED", + "JOB_STATE_QUEUED", + "JOB_STATE_PENDING", + "JOB_STATE_RUNNING", + "JOB_STATE_SUCCEEDED", + "JOB_STATE_FAILED", + "JOB_STATE_CANCELLING", + "JOB_STATE_CANCELLED", + "JOB_STATE_PAUSED", + "JOB_STATE_EXPIRED", + "JOB_STATE_UPDATING", + "JOB_STATE_PARTIALLY_SUCCEEDED" + ], + "enumDescriptions": [ + "The job state is unspecified.", + "The job has been just created or resumed and processing has not yet begun.", + "The service is preparing to run the job.", + "The job is in progress.", + "The job completed successfully.", + "The job failed.", + "The job is being cancelled. From this state the job may only go to either `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.", + "The job has been cancelled.", + "The job has been stopped, and can be resumed.", + "The job has expired.", + "The job is being updated. Only jobs in the `RUNNING` state can be updated. After updating, the job goes back to the `RUNNING` state.", + "The job is partially succeeded, some results may be missing due to errors." + ], + "readOnly": true, + "type": "string" }, - "videoFrameChild": { - "type": "boolean" + "supervisedTuningSpec": { + "$ref": "GoogleCloudAiplatformV1SupervisedTuningSpec", + "description": "Tuning Spec for Supervised Fine Tuning." }, - "videoFrameCsam": { - "type": "boolean" + "tunedModel": { + "$ref": "GoogleCloudAiplatformV1TunedModel", + "description": "Output only. The tuned model resources assiociated with this TuningJob.", + "readOnly": true }, - "videoFramePedo": { - "type": "boolean" + "tunedModelDisplayName": { + "description": "Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters.", + "type": "string" }, - "videoFramePorn": { - "description": "Video frame signals", - "type": "boolean" + "tuningDataStats": { + "$ref": "GoogleCloudAiplatformV1TuningDataStats", + "description": "Output only. The tuning data statistics associated with this TuningJob.", + "readOnly": true }, - "videoFrameViolence": { - "type": "boolean" + "updateTime": { + "description": "Output only. Time when the TuningJob was most recently updated.", + "format": "google-datetime", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "LearningGenaiRootHarmGrailImageHarmType": { - "description": "Harm type for images", - "id": "LearningGenaiRootHarmGrailImageHarmType", + "GoogleCloudAiplatformV1UndeployIndexOperationMetadata": { + "description": "Runtime operation information for IndexEndpointService.UndeployIndex.", + "id": "GoogleCloudAiplatformV1UndeployIndexOperationMetadata", "properties": { - "imageHarmType": { - "items": { - "enum": [ - "IMAGE_HARM_TYPE_UNSPECIFIED", - "IMAGE_HARM_TYPE_PORN", - "IMAGE_HARM_TYPE_VIOLENCE", - "IMAGE_HARM_TYPE_CSAI", - "IMAGE_HARM_TYPE_PEDO", - "IMAGE_HARM_TYPE_MINORS", - "IMAGE_HARM_TYPE_DANGEROUS", - "IMAGE_HARM_TYPE_MEDICAL", - "IMAGE_HARM_TYPE_RACY", - "IMAGE_HARM_TYPE_OBSCENE", - "IMAGE_HARM_TYPE_MINOR_PRESENCE", - "IMAGE_HARM_TYPE_GENERATIVE_MINOR_PRESENCE", - "IMAGE_HARM_TYPE_GENERATIVE_REALISTIC_VISIBLE_FACE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "type": "array" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." } }, "type": "object" }, - "LearningGenaiRootHarmGrailTextHarmType": { - "description": "Harm type for text", - "id": "LearningGenaiRootHarmGrailTextHarmType", + "GoogleCloudAiplatformV1UndeployIndexRequest": { + "description": "Request message for IndexEndpointService.UndeployIndex.", + "id": "GoogleCloudAiplatformV1UndeployIndexRequest", "properties": { - "harmType": { - "items": { - "enum": [ - "HARM_TYPE_UNSPECIFIED", - "HARM_TYPE_HATE", - "HARM_TYPE_TOXICITY", - "HARM_TYPE_VIOLENCE", - "HARM_TYPE_CSAI", - "HARM_TYPE_SEXUAL", - "HARM_TYPE_FRINGE", - "HARM_TYPE_POLITICAL", - "HARM_TYPE_MEMORIZATION", - "HARM_TYPE_SPII", - "HARM_TYPE_NEW_DANGEROUS", - "HARM_TYPE_MEDICAL", - "HARM_TYPE_HARASSMENT" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "New definition of dangerous.", - "", - "" - ], - "type": "string" - }, - "type": "array" + "deployedIndexId": { + "description": "Required. The ID of the DeployedIndex to be undeployed from the IndexEndpoint.", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootHarmSafetyCatCategories": { - "description": "LINT.ThenChange(//depot/google3/learning/genai/root/util/classifier/backends/grail/grail.cc)", - "id": "LearningGenaiRootHarmSafetyCatCategories", - "properties": { - "categories": { - "items": { - "enum": [ - "SAFETYCAT_CATEGORY_UNSPECIFIED", - "TOXICITY", - "OBSCENE", - "SEXUAL", - "INSULT", - "IDENTITY_HATE", - "DEATH_HARM_TRAGEDY", - "VIOLENCE_ABUSE", - "FIREARMS_WEAPONS", - "PUBLIC_SAFETY", - "HEALTH", - "RELIGION_BELIEF", - "DRUGS", - "WAR_CONFLICT", - "POLITICS", - "FINANCE", - "LEGAL", - "DANGEROUS", - "DANGEROUS_SEVERITY", - "HARASSMENT_SEVERITY", - "HATE_SEVERITY", - "SEXUAL_SEVERITY" - ], - "enumDescriptions": [ - "", - "SafetyCat categories.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Following categories are only supported in SAFETY_CAT_TEXT_V3_PAX model", - "", - "", - "", - "" - ], - "type": "string" - }, - "type": "array" - } - }, + "GoogleCloudAiplatformV1UndeployIndexResponse": { + "description": "Response message for IndexEndpointService.UndeployIndex.", + "id": "GoogleCloudAiplatformV1UndeployIndexResponse", + "properties": {}, "type": "object" }, - "LearningGenaiRootHarmSpiiFilter": { - "description": "LINT.IfChange", - "id": "LearningGenaiRootHarmSpiiFilter", + "GoogleCloudAiplatformV1UndeployModelOperationMetadata": { + "description": "Runtime operation information for EndpointService.UndeployModel.", + "id": "GoogleCloudAiplatformV1UndeployModelOperationMetadata", "properties": { - "usBankRoutingMicr": { - "type": "boolean" - }, - "usEmployerIdentificationNumber": { - "type": "boolean" - }, - "usSocialSecurityNumber": { - "type": "boolean" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." } }, "type": "object" }, - "LearningGenaiRootInternalMetadata": { - "id": "LearningGenaiRootInternalMetadata", + "GoogleCloudAiplatformV1UndeployModelRequest": { + "description": "Request message for EndpointService.UndeployModel.", + "id": "GoogleCloudAiplatformV1UndeployModelRequest", "properties": { - "scoredTokens": { - "items": { - "$ref": "LearningGenaiRootScoredToken" + "deployedModelId": { + "description": "Required. The ID of the DeployedModel to be undeployed from the Endpoint.", + "type": "string" + }, + "trafficSplit": { + "additionalProperties": { + "format": "int32", + "type": "integer" }, - "type": "array" + "description": "If this field is provided, then the Endpoint's traffic_split will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A DeployedModel will be successfully undeployed only if it doesn't have any traffic assigned to it when this method executes, or if this field unassigns any traffic to it.", + "type": "object" } }, "type": "object" }, - "LearningGenaiRootLanguageFilterResult": { - "id": "LearningGenaiRootLanguageFilterResult", + "GoogleCloudAiplatformV1UndeployModelResponse": { + "description": "Response message for EndpointService.UndeployModel.", + "id": "GoogleCloudAiplatformV1UndeployModelResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1UnmanagedContainerModel": { + "description": "Contains model information necessary to perform batch prediction without requiring a full model import.", + "id": "GoogleCloudAiplatformV1UnmanagedContainerModel", "properties": { - "allowed": { - "description": "False when query or response should be filtered out due to unsupported language.", - "type": "boolean" - }, - "detectedLanguage": { - "description": "Language of the query or response.", + "artifactUri": { + "description": "The path to the directory containing the Model artifact and any of its supporting files.", "type": "string" }, - "detectedLanguageProbability": { - "description": "Probability of the language predicted as returned by LangID.", - "format": "float", - "type": "number" + "containerSpec": { + "$ref": "GoogleCloudAiplatformV1ModelContainerSpec", + "description": "Input only. The specification of the container that is to be used when deploying this Model." + }, + "predictSchemata": { + "$ref": "GoogleCloudAiplatformV1PredictSchemata", + "description": "Contains the schemata used in Model's predictions and explanations" } }, "type": "object" }, - "LearningGenaiRootMetricOutput": { - "id": "LearningGenaiRootMetricOutput", + "GoogleCloudAiplatformV1UpdateDeploymentResourcePoolOperationMetadata": { + "description": "Runtime operation information for UpdateDeploymentResourcePool method.", + "id": "GoogleCloudAiplatformV1UpdateDeploymentResourcePoolOperationMetadata", "properties": { - "debug": { - "type": "string" - }, - "name": { - "description": "Name of the metric.", - "type": "string" - }, - "numericValue": { - "format": "double", - "type": "number" - }, - "status": { - "$ref": "UtilStatusProto" - }, - "stringValue": { - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." } }, "type": "object" }, - "LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata": { - "id": "LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata", + "GoogleCloudAiplatformV1UpdateExplanationDatasetOperationMetadata": { + "description": "Runtime operation information for ModelService.UpdateExplanationDataset.", + "id": "GoogleCloudAiplatformV1UpdateExplanationDatasetOperationMetadata", "properties": { - "factRetrievalMillisecondsByProvider": { - "additionalProperties": { - "format": "int64", - "type": "string" - }, - "description": "Latency spent on fact retrievals. There might be multiple retrievals from different fact providers.", - "type": "object" - }, - "prompt2queryMilliseconds": { - "description": "Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt.", - "format": "int64", - "type": "string" - }, - "retrievalAugmentMilliseconds": { - "description": "Latency if use GroundedGeneration service for the whole retrieval & augmentation.", - "format": "int64", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The common part of the operation metadata." } }, "type": "object" }, - "LearningGenaiRootRAIOutput": { - "description": "This is per harm.", - "id": "LearningGenaiRootRAIOutput", + "GoogleCloudAiplatformV1UpdateExplanationDatasetRequest": { + "description": "Request message for ModelService.UpdateExplanationDataset.", + "id": "GoogleCloudAiplatformV1UpdateExplanationDatasetRequest", "properties": { - "allowed": { - "type": "boolean" - }, - "harm": { - "$ref": "LearningGenaiRootHarm" - }, - "name": { - "type": "string" - }, - "score": { - "format": "double", - "type": "number" + "examples": { + "$ref": "GoogleCloudAiplatformV1Examples", + "description": "The example config containing the location of the dataset." } }, "type": "object" }, - "LearningGenaiRootRegexTakedownResult": { - "id": "LearningGenaiRootRegexTakedownResult", + "GoogleCloudAiplatformV1UpdateExplanationDatasetResponse": { + "description": "Response message of ModelService.UpdateExplanationDataset operation.", + "id": "GoogleCloudAiplatformV1UpdateExplanationDatasetResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1UpdateFeatureGroupOperationMetadata": { + "description": "Details of operations that perform update FeatureGroup.", + "id": "GoogleCloudAiplatformV1UpdateFeatureGroupOperationMetadata", "properties": { - "allowed": { - "description": "False when query or response should be taken down due to match with a blocked regex, true otherwise.", - "type": "boolean" - }, - "takedownRegex": { - "description": "Regex used to decide that query or response should be taken down. Empty when query or response is kept.", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for FeatureGroup." } }, "type": "object" }, - "LearningGenaiRootRequestMetrics": { - "id": "LearningGenaiRootRequestMetrics", + "GoogleCloudAiplatformV1UpdateFeatureOnlineStoreOperationMetadata": { + "description": "Details of operations that perform update FeatureOnlineStore.", + "id": "GoogleCloudAiplatformV1UpdateFeatureOnlineStoreOperationMetadata", "properties": { - "audioMetrics": { - "$ref": "LearningGenaiRootRequestMetricsAudioMetrics", - "description": "Metrics for audio samples in the request." - }, - "imageMetrics": { - "$ref": "LearningGenaiRootRequestMetricsImageMetrics", - "description": "Metrics for image samples in the request." - }, - "textTokenCount": { - "description": "Number of text tokens extracted from the request.", - "format": "int32", - "type": "integer" - }, - "totalTokenCount": { - "description": "Total number of tokens in the request.", - "format": "int32", - "type": "integer" - }, - "videoMetrics": { - "$ref": "LearningGenaiRootRequestMetricsVideoMetrics", - "description": "Metrics for video samples in the request." + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for FeatureOnlineStore." } }, "type": "object" }, - "LearningGenaiRootRequestMetricsAudioMetrics": { - "id": "LearningGenaiRootRequestMetricsAudioMetrics", + "GoogleCloudAiplatformV1UpdateFeatureOperationMetadata": { + "description": "Details of operations that perform update Feature.", + "id": "GoogleCloudAiplatformV1UpdateFeatureOperationMetadata", "properties": { - "audioDuration": { - "description": "Duration of the audio sample in seconds.", - "format": "google-duration", - "type": "string" - }, - "audioTokenCount": { - "description": "Number of tokens derived directly from audio data.", - "format": "int32", - "type": "integer" - }, - "numAudioFrames": { - "description": "Number of audio frames in the audio.", - "format": "int32", - "type": "integer" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for Feature Update." } }, "type": "object" }, - "LearningGenaiRootRequestMetricsImageMetrics": { - "id": "LearningGenaiRootRequestMetricsImageMetrics", + "GoogleCloudAiplatformV1UpdateFeatureViewOperationMetadata": { + "description": "Details of operations that perform update FeatureView.", + "id": "GoogleCloudAiplatformV1UpdateFeatureViewOperationMetadata", "properties": { - "imageTokenCount": { - "description": "Number of tokens extracted from image bytes.", - "format": "int32", - "type": "integer" - }, - "numImages": { - "description": "Number of images in the request.", - "format": "int32", - "type": "integer" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for FeatureView Update." } }, "type": "object" }, - "LearningGenaiRootRequestMetricsVideoMetrics": { - "id": "LearningGenaiRootRequestMetricsVideoMetrics", + "GoogleCloudAiplatformV1UpdateFeaturestoreOperationMetadata": { + "description": "Details of operations that perform update Featurestore.", + "id": "GoogleCloudAiplatformV1UpdateFeaturestoreOperationMetadata", "properties": { - "audioSample": { - "$ref": "LearningGenaiRootRequestMetricsAudioMetrics", - "description": "Metrics associated with audio sample in the video." - }, - "numVideoFrames": { - "description": "Number of video frames in the video.", - "format": "int32", - "type": "integer" - }, - "videoDuration": { - "description": "Duration of the video sample in seconds.", - "format": "google-duration", - "type": "string" - }, - "videoFramesTokenCount": { - "description": "Number of tokens extracted from video frames.", - "format": "int32", - "type": "integer" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for Featurestore." } }, "type": "object" }, - "LearningGenaiRootRequestResponseTakedownResult": { - "id": "LearningGenaiRootRequestResponseTakedownResult", + "GoogleCloudAiplatformV1UpdateIndexOperationMetadata": { + "description": "Runtime operation information for IndexService.UpdateIndex.", + "id": "GoogleCloudAiplatformV1UpdateIndexOperationMetadata", "properties": { - "allowed": { - "description": "False when response has to be taken down per above config.", - "type": "boolean" - }, - "requestTakedownRegex": { - "description": "Regex used to match the request.", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." }, - "responseTakedownRegex": { - "description": "Regex used to decide that response should be taken down. Empty when response is kept.", - "type": "string" + "nearestNeighborSearchOperationMetadata": { + "$ref": "GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata", + "description": "The operation metadata with regard to Matching Engine Index operation." } }, "type": "object" }, - "LearningGenaiRootRoutingDecision": { - "description": "Holds the final routing decision, by storing the model_config_id. And individual scores each model got.", - "id": "LearningGenaiRootRoutingDecision", + "GoogleCloudAiplatformV1UpdateModelDeploymentMonitoringJobOperationMetadata": { + "description": "Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob.", + "id": "GoogleCloudAiplatformV1UpdateModelDeploymentMonitoringJobOperationMetadata", "properties": { - "metadata": { - "$ref": "LearningGenaiRootRoutingDecisionMetadata" - }, - "modelConfigId": { - "description": "The selected model to route traffic to.", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadata": { - "description": "Debug metadata about the routing decision.", - "id": "LearningGenaiRootRoutingDecisionMetadata", + "GoogleCloudAiplatformV1UpdatePersistentResourceOperationMetadata": { + "description": "Details of operations that perform update PersistentResource.", + "id": "GoogleCloudAiplatformV1UpdatePersistentResourceOperationMetadata", "properties": { - "scoreBasedRoutingMetadata": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataScoreBased" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for PersistentResource." }, - "tokenLengthBasedRoutingMetadata": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBased" + "progressMessage": { + "description": "Progress Message for Update LRO", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataScoreBased": { - "description": "If we are routing using scored based configuration, then the metadata about that is available in this proto.", - "id": "LearningGenaiRootRoutingDecisionMetadataScoreBased", + "GoogleCloudAiplatformV1UpdateSpecialistPoolOperationMetadata": { + "description": "Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool.", + "id": "GoogleCloudAiplatformV1UpdateSpecialistPoolOperationMetadata", "properties": { - "matchedRule": { - "$ref": "LearningGenaiRootScoreBasedRoutingConfigRule", - "description": "The rule that was matched." - }, - "score": { - "$ref": "LearningGenaiRootScore", - "description": "The score that was generated by the router i.e. the model." + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." }, - "usedDefaultFallback": { - "description": "No rules were matched & therefore used the default fallback.", - "type": "boolean" + "specialistPool": { + "description": "Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataTokenLengthBased": { - "id": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBased", + "GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata": { + "description": "Details of operations that perform update Tensorboard.", + "id": "GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata", "properties": { - "modelInputTokenMetadata": { - "items": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata" - }, - "type": "array" - }, - "modelMaxTokenMetadata": { - "items": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata" - }, - "type": "array" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "Operation metadata for Tensorboard." } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata": { - "id": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata", + "GoogleCloudAiplatformV1UpgradeNotebookRuntimeOperationMetadata": { + "description": "Metadata information for NotebookService.UpgradeNotebookRuntime.", + "id": "GoogleCloudAiplatformV1UpgradeNotebookRuntimeOperationMetadata", "properties": { - "computedInputTokenLength": { - "description": "The length computed by backends using the formatter & tokenizer specific to the model", - "format": "int32", - "type": "integer" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The operation generic information." }, - "modelId": { + "progressMessage": { + "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", "type": "string" - }, - "pickedAsFallback": { - "description": "If true, the model was selected as a fallback, since no model met requirements.", - "type": "boolean" - }, - "selected": { - "description": "If true, the model was selected since it met the requriements.", - "type": "boolean" } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata": { - "id": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata", + "GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest": { + "description": "Request message for NotebookService.UpgradeNotebookRuntime.", + "id": "GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1UploadModelOperationMetadata": { + "description": "Details of ModelService.UploadModel operation.", + "id": "GoogleCloudAiplatformV1UploadModelOperationMetadata", "properties": { - "maxNumInputTokens": { - "format": "int32", - "type": "integer" - }, - "maxNumOutputTokens": { - "format": "int32", - "type": "integer" - }, - "modelId": { - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1GenericOperationMetadata", + "description": "The common part of the operation metadata." } }, "type": "object" }, - "LearningGenaiRootRuleOutput": { - "id": "LearningGenaiRootRuleOutput", + "GoogleCloudAiplatformV1UploadModelRequest": { + "description": "Request message for ModelService.UploadModel.", + "id": "GoogleCloudAiplatformV1UploadModelRequest", "properties": { - "decision": { - "enum": [ - "NO_MATCH", - "MATCH" - ], - "enumDescriptions": [ - "This rule was not matched. When used in a ClassifierOutput, this means that no rules were matched.", - "This is a generic \"match\" message, indicating that a rule was triggered. Usually you would use this for a categorization classifier." - ], + "model": { + "$ref": "GoogleCloudAiplatformV1Model", + "description": "Required. The Model to create." + }, + "modelId": { + "description": "Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen.", "type": "string" }, - "name": { + "parentModel": { + "description": "Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version.", + "type": "string" + }, + "serviceAccount": { + "description": "Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.).", "type": "string" } }, "type": "object" }, - "LearningGenaiRootScore": { - "id": "LearningGenaiRootScore", + "GoogleCloudAiplatformV1UploadModelResponse": { + "description": "Response message of ModelService.UploadModel operation.", + "id": "GoogleCloudAiplatformV1UploadModelResponse", "properties": { - "calculationType": { - "$ref": "LearningGenaiRootCalculationType" - }, - "internalMetadata": { - "$ref": "LearningGenaiRootInternalMetadata", - "description": "The internal_metadata is intended to be used by internal processors and will be cleared before returns." - }, - "thresholdType": { - "$ref": "LearningGenaiRootThresholdType" - }, - "tokensAndLogprobPerDecodingStep": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStep", - "description": "Top candidate tokens and log probabilities at each decoding step." + "model": { + "description": "The name of the uploaded Model resource. Format: `projects/{project}/locations/{location}/models/{model}`", + "type": "string" }, - "value": { - "format": "double", - "type": "number" + "modelVersionId": { + "description": "Output only. The version ID of the model that is uploaded.", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "LearningGenaiRootScoreBasedRoutingConfigRule": { - "id": "LearningGenaiRootScoreBasedRoutingConfigRule", + "GoogleCloudAiplatformV1UpsertDatapointsRequest": { + "description": "Request message for IndexService.UpsertDatapoints", + "id": "GoogleCloudAiplatformV1UpsertDatapointsRequest", "properties": { - "equalOrGreaterThan": { - "$ref": "LearningGenaiRootScore", - "description": "NOTE: Hardest examples have smaller values in their routing scores." - }, - "lessThan": { - "$ref": "LearningGenaiRootScore" + "datapoints": { + "description": "A list of datapoints to be created/updated.", + "items": { + "$ref": "GoogleCloudAiplatformV1IndexDatapoint" + }, + "type": "array" }, - "modelConfigId": { - "description": "This model_config_id points to ModelConfig::id which allows us to find the ModelConfig to route to. This is part of the banks specified in the ModelBankConfig.", + "updateMask": { + "description": "Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts.", + "format": "google-fieldmask", "type": "string" } }, "type": "object" }, - "LearningGenaiRootScoredSimilarityTakedownPhrase": { - "description": "Proto containing the results from the Universal Sentence Encoder / Other models", - "id": "LearningGenaiRootScoredSimilarityTakedownPhrase", + "GoogleCloudAiplatformV1UpsertDatapointsResponse": { + "description": "Response message for IndexService.UpsertDatapoints", + "id": "GoogleCloudAiplatformV1UpsertDatapointsResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1UserActionReference": { + "description": "References an API call. It contains more information about long running operation and Jobs that are triggered by the API call.", + "id": "GoogleCloudAiplatformV1UserActionReference", "properties": { - "phrase": { - "$ref": "LearningGenaiRootSimilarityTakedownPhrase" + "dataLabelingJob": { + "description": "For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`", + "type": "string" }, - "similarityScore": { - "format": "float", - "type": "number" + "method": { + "description": "The method name of the API RPC call. For example, \"/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset\"", + "type": "string" + }, + "operation": { + "description": "For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project}/locations/{location}/operations/{operation}`", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootScoredToken": { - "description": "A token with its own score.", - "id": "LearningGenaiRootScoredToken", + "GoogleCloudAiplatformV1Value": { + "description": "Value is the value of the field.", + "id": "GoogleCloudAiplatformV1Value", "properties": { - "endTokenScore": { - "description": "Each end_token_score is a logprob for how well the completion would end at a particular token. See http://google3/labs/language/aida/config/proto/model_config.proto;l=376;rcl=573039459", - "format": "float", + "doubleValue": { + "description": "A double value.", + "format": "double", "type": "number" }, - "score": { - "description": "Each score is the logprob for the token in model response.", - "format": "float", - "type": "number" + "intValue": { + "description": "An integer value.", + "format": "int64", + "type": "string" }, - "token": { + "stringValue": { + "description": "A string value.", "type": "string" } }, "type": "object" }, - "LearningGenaiRootSimilarityTakedownPhrase": { - "description": "Each SimilarityTakedownPhrase treats a logical group of blocked and allowed phrases together along with a corresponding punt If the closest matching response is of the allowed type, we allow the response If the closest matching response is of the blocked type, we block the response. eg: Blocked phrase - \"All lives matter\"", - "id": "LearningGenaiRootSimilarityTakedownPhrase", + "GoogleCloudAiplatformV1VertexAISearch": { + "description": "Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation", + "id": "GoogleCloudAiplatformV1VertexAISearch", "properties": { - "blockedPhrase": { + "datastore": { + "description": "Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", "type": "string" } }, "type": "object" }, - "LearningGenaiRootSimilarityTakedownResult": { - "id": "LearningGenaiRootSimilarityTakedownResult", + "GoogleCloudAiplatformV1VideoMetadata": { + "description": "Metadata describes the input video content.", + "id": "GoogleCloudAiplatformV1VideoMetadata", "properties": { - "allowed": { - "description": "False when query or response should be taken down by any of the takedown rules, true otherwise.", - "type": "boolean" + "endOffset": { + "description": "Optional. The end offset of the video.", + "format": "google-duration", + "type": "string" }, - "scoredPhrases": { - "description": "List of similar phrases with score. Set only if allowed=false.", - "items": { - "$ref": "LearningGenaiRootScoredSimilarityTakedownPhrase" - }, - "type": "array" + "startOffset": { + "description": "Optional. The start offset of the video.", + "format": "google-duration", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootTakedownResult": { - "id": "LearningGenaiRootTakedownResult", + "GoogleCloudAiplatformV1WorkerPoolSpec": { + "description": "Represents the spec of a worker pool in a job.", + "id": "GoogleCloudAiplatformV1WorkerPoolSpec", "properties": { - "allowed": { - "description": "False when query or response should be taken down by any of the takedown rules, true otherwise.", - "type": "boolean" + "containerSpec": { + "$ref": "GoogleCloudAiplatformV1ContainerSpec", + "description": "The custom container task." + }, + "diskSpec": { + "$ref": "GoogleCloudAiplatformV1DiskSpec", + "description": "Disk spec." + }, + "machineSpec": { + "$ref": "GoogleCloudAiplatformV1MachineSpec", + "description": "Optional. Immutable. The specification of a single machine." }, - "regexTakedownResult": { - "$ref": "LearningGenaiRootRegexTakedownResult" + "nfsMounts": { + "description": "Optional. List of NFS mount spec.", + "items": { + "$ref": "GoogleCloudAiplatformV1NfsMount" + }, + "type": "array" }, - "requestResponseTakedownResult": { - "$ref": "LearningGenaiRootRequestResponseTakedownResult" + "pythonPackageSpec": { + "$ref": "GoogleCloudAiplatformV1PythonPackageSpec", + "description": "The Python packaged task." }, - "similarityTakedownResult": { - "$ref": "LearningGenaiRootSimilarityTakedownResult" + "replicaCount": { + "description": "Optional. The number of worker replicas to use for this worker pool.", + "format": "int64", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootThresholdType": { - "description": "The type of score that bundled with a threshold, and will not be attending the final score calculation. How each score type uses the threshold can be implementation details.", - "id": "LearningGenaiRootThresholdType", + "GoogleCloudAiplatformV1WriteFeatureValuesPayload": { + "description": "Contains Feature values to be written for a specific entity.", + "id": "GoogleCloudAiplatformV1WriteFeatureValuesPayload", "properties": { - "scoreType": { - "enum": [ - "TYPE_UNKNOWN", - "TYPE_SAFE", - "TYPE_POLICY", - "TYPE_GENERATION" - ], - "enumDescriptions": [ - "Unknown scorer type.", - "Safety scorer.", - "Policy scorer.", - "Generation scorer." - ], + "entityId": { + "description": "Required. The ID of the entity.", "type": "string" }, - "threshold": { - "format": "double", - "type": "number" + "featureValues": { + "additionalProperties": { + "$ref": "GoogleCloudAiplatformV1FeatureValue" + }, + "description": "Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future.", + "type": "object" } }, "type": "object" }, - "LearningGenaiRootTokensAndLogProbPerDecodingStep": { - "description": "Results of RandomSamplingParams::top_k_logprob_per_decoding_step.", - "id": "LearningGenaiRootTokensAndLogProbPerDecodingStep", + "GoogleCloudAiplatformV1WriteFeatureValuesRequest": { + "description": "Request message for FeaturestoreOnlineServingService.WriteFeatureValues.", + "id": "GoogleCloudAiplatformV1WriteFeatureValuesRequest", "properties": { - "chosenCandidates": { - "description": "Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates.", - "items": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate" - }, - "type": "array" - }, - "topCandidates": { - "description": "Length = total number of decoding steps.", + "payloads": { + "description": "Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`.", "items": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates" + "$ref": "GoogleCloudAiplatformV1WriteFeatureValuesPayload" }, "type": "array" } }, "type": "object" }, - "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate": { - "description": "A candidate at a decoding step.", - "id": "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate", - "properties": { - "logProbability": { - "description": "The candidate's log probability.", - "format": "float", - "type": "number" - }, - "token": { - "description": "The candidate’s token value.", - "type": "string" - } - }, + "GoogleCloudAiplatformV1WriteFeatureValuesResponse": { + "description": "Response message for FeaturestoreOnlineServingService.WriteFeatureValues.", + "id": "GoogleCloudAiplatformV1WriteFeatureValuesResponse", + "properties": {}, "type": "object" }, - "LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates": { - "description": "Candidates with top log probabilities at each decoding step.", - "id": "LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates", + "GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest": { + "description": "Request message for TensorboardService.WriteTensorboardExperimentData.", + "id": "GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest", "properties": { - "candidates": { - "description": "Sorted by log probability in descending order.", + "writeRunDataRequests": { + "description": "Required. Requests containing per-run TensorboardTimeSeries data to write.", "items": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate" + "$ref": "GoogleCloudAiplatformV1WriteTensorboardRunDataRequest" }, "type": "array" } }, "type": "object" }, - "LearningGenaiRootToxicityResult": { - "description": "A model can generate multiple signals and this captures all the generated signals for a single message.", - "id": "LearningGenaiRootToxicityResult", + "GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse": { + "description": "Response message for TensorboardService.WriteTensorboardExperimentData.", + "id": "GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1WriteTensorboardRunDataRequest": { + "description": "Request message for TensorboardService.WriteTensorboardRunData.", + "id": "GoogleCloudAiplatformV1WriteTensorboardRunDataRequest", "properties": { - "signals": { + "tensorboardRun": { + "description": "Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`", + "type": "string" + }, + "timeSeriesData": { + "description": "Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000.", "items": { - "$ref": "LearningGenaiRootToxicitySignal" + "$ref": "GoogleCloudAiplatformV1TimeSeriesData" }, "type": "array" } }, "type": "object" }, - "LearningGenaiRootToxicitySignal": { - "description": "Proto to capture a signal generated by the toxicity model.", - "id": "LearningGenaiRootToxicitySignal", + "GoogleCloudAiplatformV1WriteTensorboardRunDataResponse": { + "description": "Response message for TensorboardService.WriteTensorboardRunData.", + "id": "GoogleCloudAiplatformV1WriteTensorboardRunDataResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1XraiAttribution": { + "description": "An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models.", + "id": "GoogleCloudAiplatformV1XraiAttribution", "properties": { - "allowed": { - "type": "boolean" + "blurBaselineConfig": { + "$ref": "GoogleCloudAiplatformV1BlurBaselineConfig", + "description": "Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383" }, - "label": { - "enum": [ - "LABEL_UNSPECIFIED", - "NOT_SENSITIVE", - "SENSITIVE", - "ACCIDENTS_DISASTERS", - "ADULT", - "COMPUTER_SECURITY", - "CONTROVERSIAL_SOCIAL_ISSUES", - "DEATH_TRAGEDY", - "DRUGS", - "IDENTITY_ETHNICITY", - "FINANCIAL_HARDSHIP", - "FIREARMS_WEAPONS", - "HEALTH", - "INSULT", - "LEGAL", - "MENTAL_HEALTH", - "POLITICS", - "RELIGION_BELIEFS", - "SAFETY", - "SELF_HARM", - "SPECIAL_NEEDS", - "TERRORISM", - "TOXIC", - "TROUBLED_RELATIONSHIP", - "VIOLENCE_ABUSE", - "VULGAR", - "WAR_CONFLICT" - ], - "enumDescriptions": [ - "Default label.", - "Input is not sensitive.", - "Input is sensitive.", - "Input is related to accidents or disasters.", - "Input contains adult content.", - "Input is related to computer security.", - "Input contains controversial social issues.", - "Input is related to death tragedy.", - "Input is related to drugs.", - "Input is related to identity or ethnicity.", - "Input is related to financial hardship.", - "Input is related to firearms or weapons.", - "Input contains health related information.", - "Input may be an insult.", - "Input is related to legal content.", - "Input contains mental health related information.", - "Input is related to politics.", - "Input is related to religions or beliefs.", - "Input is related to safety.", - "Input is related to self-harm.", - "Input is related to special needs.", - "Input is related to terrorism.", - "Input is toxic.", - "Input is related to troubled relationships.", - "Input contains content about violence or abuse.", - "Input is vulgar.", - "Input is related to war and conflict." - ], - "type": "string" + "smoothGradConfig": { + "$ref": "GoogleCloudAiplatformV1SmoothGradConfig", + "description": "Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf" }, - "score": { - "format": "float", - "type": "number" + "stepCount": { + "description": "Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "LearningGenaiRootTranslationRequestInfo": { - "description": "Each TranslationRequestInfo corresponds to a request sent to the translation server.", - "id": "LearningGenaiRootTranslationRequestInfo", + "GoogleCloudLocationListLocationsResponse": { + "description": "The response message for Locations.ListLocations.", + "id": "GoogleCloudLocationListLocationsResponse", "properties": { - "detectedLanguageCodes": { - "description": "The ISO-639 language code of source text in the initial request, detected automatically, if no source language was passed within the initial request. If the source language was passed, auto-detection of the language does not occur and this field is empty.", + "locations": { + "description": "A list of locations that matches the specified filter in the request.", "items": { - "type": "string" + "$ref": "GoogleCloudLocationLocation" }, "type": "array" }, - "totalContentSize": { - "description": "The sum of the size of all the contents in the request.", - "format": "int64", + "nextPageToken": { + "description": "The standard List next-page token.", "type": "string" } }, "type": "object" }, - "LearningServingLlmAtlasOutputMetadata": { - "id": "LearningServingLlmAtlasOutputMetadata", + "GoogleCloudLocationLocation": { + "description": "A resource that represents a Google Cloud location.", + "id": "GoogleCloudLocationLocation", "properties": { - "requestTopic": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", "type": "string" }, - "source": { - "enum": [ - "UNKNOWN", - "FACTUALITY", - "INFOBOT", - "LLM" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", + "type": "object" + }, + "locationId": { + "description": "The canonical id for this location. For example: `\"us-east1\"`.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata. For example the available capacity at the given location.", + "type": "object" + }, + "name": { + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", "type": "string" } }, "type": "object" }, - "LearningServingLlmMessageMetadata": { - "description": "LINT.IfChange This metadata contains additional information required for debugging.", - "id": "LearningServingLlmMessageMetadata", + "GoogleIamV1Binding": { + "description": "Associates `members`, or principals, with a `role`.", + "id": "GoogleIamV1Binding", "properties": { - "atlasMetadata": { - "$ref": "LearningServingLlmAtlasOutputMetadata" - }, - "classifierSummary": { - "$ref": "LearningGenaiRootClassifierOutputSummary", - "description": "Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not." - }, - "codeyOutput": { - "$ref": "LearningGenaiRootCodeyOutput", - "description": "Contains metadata related to Codey Processors." - }, - "currentStreamTextLength": { - "format": "uint32", - "type": "integer" - }, - "deleted": { - "description": "Whether the corresponding message has been deleted.", - "type": "boolean" + "condition": { + "$ref": "GoogleTypeExpr", + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, - "filterMeta": { - "description": "Metadata for filters that triggered.", + "members": { + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.", "items": { - "$ref": "LearningGenaiRootFilterMetadata" + "type": "string" }, "type": "array" }, - "finalMessageScore": { - "$ref": "LearningGenaiRootScore", - "description": "This score is finally used for ranking the message. This will be same as the score present in `Message.score` field." - }, - "finishReason": { - "description": "NOT YET IMPLEMENTED.", - "enum": [ - "UNSPECIFIED", - "RETURN", - "STOP", - "MAX_TOKENS", - "FILTER", - "TOP_N_FILTERED" - ], - "enumDescriptions": [ - "", - "Return all the tokens back. This typically implies no filtering or stop sequence was triggered.", - "Finished due to provided stop sequence.", - "Model has emitted the maximum number of tokens as specified by max_decoding_steps.", - "Finished due to triggering some post-processing filter.", - "Filtered out due to Top_N < Response_Candidates.Size()" - ], - "type": "string" - }, - "groundingMetadata": { - "$ref": "LearningGenaiRootGroundingMetadata" - }, - "isCode": { - "description": "Applies to streaming response message only. Whether the message is a code.", - "type": "boolean" - }, - "isFallback": { - "description": "Applies to Response message only. Indicates whether the message is a fallback and the response would have otherwise been empty.", - "type": "boolean" - }, - "langidResult": { - "$ref": "NlpSaftLangIdResult", - "description": "Result from nlp_saft DetectLanguage method. Currently the predicted language code and language probability is used." - }, - "language": { - "description": "Detected language.", - "type": "string" - }, - "lmPrefix": { - "description": "The LM prefix used to generate this response.", - "type": "string" - }, - "lmrootInternalRequestMetrics": { - "$ref": "LearningGenaiRootRequestMetrics", - "description": "FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames." - }, - "mmRecitationResult": { - "$ref": "LearningGenaiRecitationMMRecitationCheckResult", - "description": "Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked." - }, - "numRewinds": { - "description": "Number of Controlled Decoding rewind and repeats that have happened for this response.", - "format": "uint32", - "type": "integer" - }, - "originalText": { - "description": "The original text generated by LLM. This is the raw output for debugging purposes.", + "role": { + "description": "Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).", "type": "string" - }, - "perStreamDecodedTokenCount": { - "description": "Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only.", - "format": "int32", - "type": "integer" - }, - "perStreamReturnedTokenCount": { - "description": "Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only.", - "format": "int32", - "type": "integer" - }, - "raiOutputs": { - "description": "Results of running RAI on the query or this response candidate. One output per rai_config. It will be populated regardless of whether the threshold is exceeded or not.", - "items": { - "$ref": "LearningGenaiRootRAIOutput" - }, - "type": "array" - }, - "recitationResult": { - "$ref": "LearningGenaiRecitationRecitationResult", - "description": "Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome." - }, - "scores": { - "description": "All the different scores for a message are logged here.", + } + }, + "type": "object" + }, + "GoogleIamV1Policy": { + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", + "id": "GoogleIamV1Policy", + "properties": { + "bindings": { + "description": "Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.", "items": { - "$ref": "LearningGenaiRootScore" + "$ref": "GoogleIamV1Binding" }, "type": "array" }, - "streamTerminated": { - "description": "Whether the response is terminated during streaming return. Only used for streaming requests.", - "type": "boolean" - }, - "totalDecodedTokenCount": { - "description": "Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate.", - "format": "int32", - "type": "integer" + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", + "format": "byte", + "type": "string" }, - "totalReturnedTokenCount": { - "description": "Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only.", + "version": { + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" - }, - "translatedUserPrompts": { - "description": "Translated user-prompt used for RAI post processing. This is for internal processing only. We will translate in pre-processor and pass the translated text to the post processor using this field. It will be empty if non of the signals requested need translation.", + } + }, + "type": "object" + }, + "GoogleIamV1SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "id": "GoogleIamV1SetIamPolicyRequest", + "properties": { + "policy": { + "$ref": "GoogleIamV1Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." + } + }, + "type": "object" + }, + "GoogleIamV1TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "id": "GoogleIamV1TestIamPermissionsResponse", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, "type": "array" - }, - "vertexRaiResult": { - "$ref": "CloudAiNlLlmProtoServiceRaiResult", - "description": "The metadata from Vertex SafetyCat processors" } }, "type": "object" }, - "NlpSaftLangIdLocalesResult": { - "id": "NlpSaftLangIdLocalesResult", + "GoogleLongrunningListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "id": "GoogleLongrunningListOperationsResponse", "properties": { - "predictions": { - "description": "List of locales in which the text would be considered acceptable. Sorted in descending order according to each locale's respective likelihood. For example, if a Portuguese text is acceptable in both Brazil and Portugal, but is more strongly associated with Brazil, then the predictions would be [\"pt-BR\", \"pt-PT\"], in that order. May be empty, indicating that the model did not predict any acceptable locales.", + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", "items": { - "$ref": "NlpSaftLangIdLocalesResultLocale" + "$ref": "GoogleLongrunningOperation" }, "type": "array" } }, "type": "object" }, - "NlpSaftLangIdLocalesResultLocale": { - "id": "NlpSaftLangIdLocalesResultLocale", + "GoogleLongrunningOperation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "GoogleLongrunningOperation", "properties": { - "languageCode": { - "description": "A BCP 47 language code that includes region information. For example, \"pt-BR\" or \"pt-PT\". This field will always be populated.", + "done": { + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", + "type": "boolean" + }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", + "type": "object" } }, "type": "object" }, - "NlpSaftLangIdResult": { - "id": "NlpSaftLangIdResult", + "GoogleProtobufEmpty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", + "id": "GoogleProtobufEmpty", + "properties": {}, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "GoogleRpcStatus", "properties": { - "modelVersion": { - "description": "The version of the model used to create these annotations.", - "enum": [ - "VERSION_UNSPECIFIED", - "INDEXING_20181017", - "INDEXING_20191206", - "INDEXING_20200313", - "INDEXING_20210618", - "STANDARD_20220516" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" }, - "predictions": { - "description": "This field stores the n-best list of possible BCP 47 language code strings for a given input sorted in descending order according to each code's respective probability.", + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { - "$ref": "NlpSaftLanguageSpan" + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" }, "type": "array" }, - "spanPredictions": { - "description": "This field stores language predictions of subspans of the input, when available. Each LanguageSpanSequence is a sequence of LanguageSpans. A particular sequence of LanguageSpans has an associated probability, and need not necessarily cover the entire input. If no language could be predicted for any span, then this field may be empty.", - "items": { - "$ref": "NlpSaftLanguageSpanSequence" - }, - "type": "array" + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" } }, "type": "object" }, - "NlpSaftLanguageSpan": { - "id": "NlpSaftLanguageSpan", + "GoogleTypeColor": { + "description": "Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ...", + "id": "GoogleTypeColor", "properties": { - "end": { - "format": "int32", - "type": "integer" + "alpha": { + "description": "The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0).", + "format": "float", + "type": "number" }, - "languageCode": { - "description": "A BCP 47 language code for this span.", - "type": "string" + "blue": { + "description": "The amount of blue in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" }, - "locales": { - "$ref": "NlpSaftLangIdLocalesResult", - "description": "Optional field containing any information that was predicted about the specific locale(s) of the span." + "green": { + "description": "The amount of green in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" }, - "probability": { - "description": "A probability associated with this prediction.", + "red": { + "description": "The amount of red in the color as a value in the interval [0, 1].", "format": "float", "type": "number" + } + }, + "type": "object" + }, + "GoogleTypeDate": { + "description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp", + "id": "GoogleTypeDate", + "properties": { + "day": { + "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.", + "format": "int32", + "type": "integer" }, - "start": { - "description": "Start and end byte offsets, inclusive, within the given input string. A value of -1 implies that this field is not set. Both fields must either be set with a nonnegative value or both are unset. If both are unset then this LanguageSpan applies to the entire input.", + "month": { + "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.", + "format": "int32", + "type": "integer" + }, + "year": { + "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.", "format": "int32", "type": "integer" } }, "type": "object" }, - "NlpSaftLanguageSpanSequence": { - "id": "NlpSaftLanguageSpanSequence", + "GoogleTypeExpr": { + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", + "id": "GoogleTypeExpr", "properties": { - "languageSpans": { - "description": "A sequence of LanguageSpan objects, each assigning a language to a subspan of the input.", - "items": { - "$ref": "NlpSaftLanguageSpan" - }, - "type": "array" + "description": { + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "type": "string" }, - "probability": { - "description": "The probability of this sequence of LanguageSpans.", - "format": "float", - "type": "number" + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax.", + "type": "string" + }, + "location": { + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" } }, "type": "object" }, - "Proto2BridgeMessageSet": { - "description": "This is proto2's version of MessageSet.", - "id": "Proto2BridgeMessageSet", - "properties": {}, + "GoogleTypeInterval": { + "description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", + "id": "GoogleTypeInterval", + "properties": { + "endTime": { + "description": "Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start.", + "format": "google-datetime", + "type": "string" + } + }, "type": "object" }, - "UtilStatusProto": { - "description": "Wire-format for a Status object", - "id": "UtilStatusProto", + "GoogleTypeMoney": { + "description": "Represents an amount of money with its currency type.", + "id": "GoogleTypeMoney", "properties": { - "canonicalCode": { - "description": "The canonical error code (see codes.proto) that most closely corresponds to this status. This may be missing, and in the common case of the generic space, it definitely will be.", - "format": "int32", - "type": "integer" + "currencyCode": { + "description": "The three-letter currency code defined in ISO 4217.", + "type": "string" }, - "code": { - "description": "Numeric code drawn from the space specified below. Often, this is the canonical error space, and code is drawn from google3/util/task/codes.proto", + "nanos": { + "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", "format": "int32", "type": "integer" }, - "message": { - "description": "Detail message", - "type": "string" - }, - "messageSet": { - "$ref": "Proto2BridgeMessageSet", - "description": "message_set associates an arbitrary proto message with the status." - }, - "space": { - "description": "The following are usually only present when code != 0 Space to which this status belongs", + "units": { + "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", + "format": "int64", "type": "string" } }, diff --git a/discovery/aiplatform-v1beta1.json b/discovery/aiplatform-v1beta1.json index bcf212f484..e4fd2f0dbc 100644 --- a/discovery/aiplatform-v1beta1.json +++ b/discovery/aiplatform-v1beta1.json @@ -20,8 +20,8 @@ "documentationLink": "https://cloud.google.com/vertex-ai/", "endpoints": [ { - "description": "Locational Endpoint", "location": "africa-south1", + "description": "Locational Endpoint", "endpointUrl": "https://africa-south1-aiplatform.googleapis.com/" }, { @@ -30,9 +30,9 @@ "endpointUrl": "https://asia-east1-aiplatform.googleapis.com/" }, { - "endpointUrl": "https://asia-east2-aiplatform.googleapis.com/", "description": "Locational Endpoint", - "location": "asia-east2" + "location": "asia-east2", + "endpointUrl": "https://asia-east2-aiplatform.googleapis.com/" }, { "description": "Locational Endpoint", @@ -40,14 +40,14 @@ "location": "asia-northeast1" }, { - "description": "Locational Endpoint", "endpointUrl": "https://asia-northeast2-aiplatform.googleapis.com/", + "description": "Locational Endpoint", "location": "asia-northeast2" }, { + "location": "asia-northeast3", "description": "Locational Endpoint", - "endpointUrl": "https://asia-northeast3-aiplatform.googleapis.com/", - "location": "asia-northeast3" + "endpointUrl": "https://asia-northeast3-aiplatform.googleapis.com/" }, { "endpointUrl": "https://asia-south1-aiplatform.googleapis.com/", @@ -55,19 +55,19 @@ "description": "Locational Endpoint" }, { - "description": "Locational Endpoint", + "location": "asia-southeast1", "endpointUrl": "https://asia-southeast1-aiplatform.googleapis.com/", - "location": "asia-southeast1" + "description": "Locational Endpoint" }, { - "location": "asia-southeast2", "endpointUrl": "https://asia-southeast2-aiplatform.googleapis.com/", + "location": "asia-southeast2", "description": "Locational Endpoint" }, { - "endpointUrl": "https://australia-southeast1-aiplatform.googleapis.com/", + "description": "Locational Endpoint", "location": "australia-southeast1", - "description": "Locational Endpoint" + "endpointUrl": "https://australia-southeast1-aiplatform.googleapis.com/" }, { "description": "Locational Endpoint", @@ -75,54 +75,54 @@ "endpointUrl": "https://australia-southeast2-aiplatform.googleapis.com/" }, { - "location": "europe-central2", + "description": "Locational Endpoint", "endpointUrl": "https://europe-central2-aiplatform.googleapis.com/", - "description": "Locational Endpoint" + "location": "europe-central2" }, { - "description": "Locational Endpoint", "endpointUrl": "https://europe-north1-aiplatform.googleapis.com/", + "description": "Locational Endpoint", "location": "europe-north1" }, { + "endpointUrl": "https://europe-southwest1-aiplatform.googleapis.com/", "location": "europe-southwest1", - "description": "Locational Endpoint", - "endpointUrl": "https://europe-southwest1-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { + "endpointUrl": "https://europe-west1-aiplatform.googleapis.com/", "location": "europe-west1", - "description": "Locational Endpoint", - "endpointUrl": "https://europe-west1-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { - "description": "Locational Endpoint", "endpointUrl": "https://europe-west2-aiplatform.googleapis.com/", + "description": "Locational Endpoint", "location": "europe-west2" }, { - "location": "europe-west3", + "endpointUrl": "https://europe-west3-aiplatform.googleapis.com/", "description": "Locational Endpoint", - "endpointUrl": "https://europe-west3-aiplatform.googleapis.com/" + "location": "europe-west3" }, { - "endpointUrl": "https://europe-west4-aiplatform.googleapis.com/", "location": "europe-west4", - "description": "Locational Endpoint" + "description": "Locational Endpoint", + "endpointUrl": "https://europe-west4-aiplatform.googleapis.com/" }, { + "location": "europe-west6", "description": "Locational Endpoint", - "endpointUrl": "https://europe-west6-aiplatform.googleapis.com/", - "location": "europe-west6" + "endpointUrl": "https://europe-west6-aiplatform.googleapis.com/" }, { + "location": "europe-west8", "endpointUrl": "https://europe-west8-aiplatform.googleapis.com/", - "description": "Locational Endpoint", - "location": "europe-west8" + "description": "Locational Endpoint" }, { - "location": "europe-west9", + "description": "Locational Endpoint", "endpointUrl": "https://europe-west9-aiplatform.googleapis.com/", - "description": "Locational Endpoint" + "location": "europe-west9" }, { "description": "Locational Endpoint", @@ -131,13 +131,13 @@ }, { "endpointUrl": "https://me-central1-aiplatform.googleapis.com/", - "description": "Locational Endpoint", - "location": "me-central1" + "location": "me-central1", + "description": "Locational Endpoint" }, { - "location": "me-central2", + "endpointUrl": "https://me-central2-aiplatform.googleapis.com/", "description": "Locational Endpoint", - "endpointUrl": "https://me-central2-aiplatform.googleapis.com/" + "location": "me-central2" }, { "endpointUrl": "https://me-west1-aiplatform.googleapis.com/", @@ -155,34 +155,34 @@ "endpointUrl": "https://northamerica-northeast2-aiplatform.googleapis.com/" }, { + "location": "southamerica-east1", "description": "Locational Endpoint", - "endpointUrl": "https://southamerica-east1-aiplatform.googleapis.com/", - "location": "southamerica-east1" + "endpointUrl": "https://southamerica-east1-aiplatform.googleapis.com/" }, { - "endpointUrl": "https://southamerica-west1-aiplatform.googleapis.com/", "description": "Locational Endpoint", + "endpointUrl": "https://southamerica-west1-aiplatform.googleapis.com/", "location": "southamerica-west1" }, { - "description": "Locational Endpoint", + "endpointUrl": "https://us-central1-aiplatform.googleapis.com/", "location": "us-central1", - "endpointUrl": "https://us-central1-aiplatform.googleapis.com/" + "description": "Locational Endpoint" }, { - "location": "us-central2", "description": "Locational Endpoint", + "location": "us-central2", "endpointUrl": "https://us-central2-aiplatform.googleapis.com/" }, { - "location": "us-east1", "endpointUrl": "https://us-east1-aiplatform.googleapis.com/", + "location": "us-east1", "description": "Locational Endpoint" }, { "endpointUrl": "https://us-east4-aiplatform.googleapis.com/", - "location": "us-east4", - "description": "Locational Endpoint" + "description": "Locational Endpoint", + "location": "us-east4" }, { "location": "us-south1", @@ -190,14 +190,14 @@ "endpointUrl": "https://us-south1-aiplatform.googleapis.com/" }, { - "endpointUrl": "https://us-west1-aiplatform.googleapis.com/", "location": "us-west1", + "endpointUrl": "https://us-west1-aiplatform.googleapis.com/", "description": "Locational Endpoint" }, { + "description": "Locational Endpoint", "endpointUrl": "https://us-west2-aiplatform.googleapis.com/", - "location": "us-west2", - "description": "Locational Endpoint" + "location": "us-west2" }, { "endpointUrl": "https://us-west3-aiplatform.googleapis.com/", @@ -205,9 +205,9 @@ "description": "Locational Endpoint" }, { + "location": "us-west4", "endpointUrl": "https://us-west4-aiplatform.googleapis.com/", - "description": "Locational Endpoint", - "location": "us-west4" + "description": "Locational Endpoint" }, { "description": "Locational Endpoint", @@ -19595,23 +19595,9 @@ } } }, - "revision": "20240507", + "revision": "20240510", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { - "CloudAiLargeModelsVisionEmbedVideoResponse": { - "description": "Video embedding response.", - "id": "CloudAiLargeModelsVisionEmbedVideoResponse", - "properties": { - "videoEmbeddings": { - "description": "The embedding vector for the video.", - "items": { - "type": "any" - }, - "type": "array" - } - }, - "type": "object" - }, "CloudAiLargeModelsVisionFilteredText": { "description": "Details for filtered input text.", "id": "CloudAiLargeModelsVisionFilteredText", @@ -19821,17 +19807,6 @@ }, "type": "object" }, - "CloudAiLargeModelsVisionMediaGenerateContentResponse": { - "description": "Generate media content response", - "id": "CloudAiLargeModelsVisionMediaGenerateContentResponse", - "properties": { - "response": { - "$ref": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse", - "description": "Response to the user's request." - } - }, - "type": "object" - }, "CloudAiLargeModelsVisionNamedBoundingBox": { "id": "CloudAiLargeModelsVisionNamedBoundingBox", "properties": { @@ -19894,52 +19869,6 @@ }, "type": "object" }, - "CloudAiLargeModelsVisionReasonVideoResponse": { - "description": "Video reasoning response.", - "id": "CloudAiLargeModelsVisionReasonVideoResponse", - "properties": { - "responses": { - "description": "Generated text responses. The generated responses for different segments within the same video.", - "items": { - "$ref": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse" - }, - "type": "array" - } - }, - "type": "object" - }, - "CloudAiLargeModelsVisionReasonVideoResponseTextResponse": { - "description": "Contains text that is the response of the video captioning.", - "id": "CloudAiLargeModelsVisionReasonVideoResponseTextResponse", - "properties": { - "relativeTemporalPartition": { - "$ref": "CloudAiLargeModelsVisionRelativeTemporalPartition", - "description": "Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video." - }, - "text": { - "description": "Text information", - "type": "string" - } - }, - "type": "object" - }, - "CloudAiLargeModelsVisionRelativeTemporalPartition": { - "description": "For ease of use, assume that the start_offset is inclusive and the end_offset is exclusive. In mathematical terms, the partition would be written as [start_offset, end_offset).", - "id": "CloudAiLargeModelsVisionRelativeTemporalPartition", - "properties": { - "endOffset": { - "description": "End time offset of the partition.", - "format": "google-duration", - "type": "string" - }, - "startOffset": { - "description": "Start time offset of the partition.", - "format": "google-duration", - "type": "string" - } - }, - "type": "object" - }, "CloudAiLargeModelsVisionSemanticFilterResponse": { "id": "CloudAiLargeModelsVisionSemanticFilterResponse", "properties": { @@ -19973,1464 +19902,703 @@ }, "type": "object" }, - "CloudAiNlLlmProtoServiceCandidate": { - "id": "CloudAiNlLlmProtoServiceCandidate", + "GoogleApiHttpBody": { + "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.", + "id": "GoogleApiHttpBody", "properties": { - "citationMetadata": { - "$ref": "CloudAiNlLlmProtoServiceCitationMetadata", - "description": "Source attribution of the generated content." - }, - "content": { - "$ref": "CloudAiNlLlmProtoServiceContent", - "description": "Content of the candidate." - }, - "finishMessage": { - "description": "A string that describes the filtering behavior in more detail. Only filled when reason is set.", + "contentType": { + "description": "The HTTP Content-Type header value specifying the content type of the body.", "type": "string" }, - "finishReason": { - "description": "The reason why the model stopped generating tokens.", - "enum": [ - "FINISH_REASON_UNSPECIFIED", - "FINISH_REASON_STOP", - "FINISH_REASON_MAX_TOKENS", - "FINISH_REASON_SAFETY", - "FINISH_REASON_RECITATION", - "FINISH_REASON_OTHER", - "FINISH_REASON_BLOCKLIST", - "FINISH_REASON_PROHIBITED_CONTENT", - "FINISH_REASON_SPII" - ], - "enumDescriptions": [ - "The finish reason is unspecified.", - "Natural stop point of the model or provided stop sequence.", - "The maximum number of tokens as specified in the request was reached.", - "The token generation was stopped as the response was flagged for safety reasons. NOTE: When streaming the Candidate.content will be empty if content filters blocked the output.", - "The token generation was stopped as the response was flagged for unauthorized citations.", - "All other reasons that stopped the token generation (currently only language filter).", - "The token generation was stopped as the response was flagged for the terms which are included from the terminology blocklist.", - "The token generation was stopped as the response was flagged for the prohibited contents (currently only CSAM).", - "The token generation was stopped as the response was flagged for Sensitive Personally Identifiable Information (SPII) contents." - ], + "data": { + "description": "The HTTP request/response body as raw binary.", + "format": "byte", "type": "string" }, - "groundingMetadata": { - "$ref": "LearningGenaiRootGroundingMetadata", - "description": "Grounding metadata. Combine with the facts list from response to generate grounding citations for this choice." - }, - "index": { - "description": "Index of the candidate.", - "format": "int32", - "type": "integer" - }, - "safetyRatings": { - "description": "Safety ratings of the generated content.", + "extensions": { + "description": "Application specific response metadata. Must be set in the first response for streaming APIs.", "items": { - "$ref": "CloudAiNlLlmProtoServiceSafetyRating" + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" }, "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceCitation": { - "description": "Source attributions for content.", - "id": "CloudAiNlLlmProtoServiceCitation", + "GoogleCloudAiplatformV1beta1ActiveLearningConfig": { + "description": "Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy.", + "id": "GoogleCloudAiplatformV1beta1ActiveLearningConfig", "properties": { - "endIndex": { - "description": "End index into the content.", - "format": "int32", - "type": "integer" - }, - "license": { - "description": "License of the attribution.", + "maxDataItemCount": { + "description": "Max number of human labeled DataItems.", + "format": "int64", "type": "string" }, - "publicationDate": { - "$ref": "GoogleTypeDate", - "description": "Publication date of the attribution." - }, - "startIndex": { - "description": "Start index into the content.", + "maxDataItemPercentage": { + "description": "Max percent of total DataItems for human labeling.", "format": "int32", "type": "integer" }, - "title": { - "description": "Title of the attribution.", - "type": "string" + "sampleConfig": { + "$ref": "GoogleCloudAiplatformV1beta1SampleConfig", + "description": "Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy." }, - "uri": { - "description": "Url reference of the attribution.", - "type": "string" + "trainingConfig": { + "$ref": "GoogleCloudAiplatformV1beta1TrainingConfig", + "description": "CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems." } }, "type": "object" }, - "CloudAiNlLlmProtoServiceCitationMetadata": { - "description": "A collection of source attributions for a piece of content.", - "id": "CloudAiNlLlmProtoServiceCitationMetadata", + "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsRequest": { + "description": "Request message for MetadataService.AddContextArtifactsAndExecutions.", + "id": "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsRequest", "properties": { - "citations": { - "description": "List of citations.", + "artifacts": { + "description": "The resource names of the Artifacts to attribute to the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`", + "items": { + "type": "string" + }, + "type": "array" + }, + "executions": { + "description": "The resource names of the Executions to associate with the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`", "items": { - "$ref": "CloudAiNlLlmProtoServiceCitation" + "type": "string" }, "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceContent": { - "description": "The content of a single message from a participant.", - "id": "CloudAiNlLlmProtoServiceContent", + "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsResponse": { + "description": "Response message for MetadataService.AddContextArtifactsAndExecutions.", + "id": "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1AddContextChildrenRequest": { + "description": "Request message for MetadataService.AddContextChildren.", + "id": "GoogleCloudAiplatformV1beta1AddContextChildrenRequest", "properties": { - "isCached": { - "description": "If true, the content is from a cached content.", - "type": "boolean" - }, - "parts": { - "description": "The parts of the message.", + "childContexts": { + "description": "The resource names of the child Contexts.", "items": { - "$ref": "CloudAiNlLlmProtoServicePart" + "type": "string" }, "type": "array" - }, - "role": { - "description": "The role of the current conversation participant.", - "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceFact": { - "description": "A condense version of WorldFact (assistant/boq/lamda/factuality/proto/factuality.proto) to propagate the essential information about the fact used in factuality to the upstream caller.", - "id": "CloudAiNlLlmProtoServiceFact", - "properties": { - "query": { - "description": "Query that is used to retrieve this fact.", - "type": "string" - }, - "summary": { - "description": "If present, the summary/snippet of the fact.", - "type": "string" - }, - "title": { - "description": "If present, it refers to the title of this fact.", - "type": "string" - }, - "url": { - "description": "If present, this URL links to the webpage of the fact.", - "type": "string" - } - }, + "GoogleCloudAiplatformV1beta1AddContextChildrenResponse": { + "description": "Response message for MetadataService.AddContextChildren.", + "id": "GoogleCloudAiplatformV1beta1AddContextChildrenResponse", + "properties": {}, "type": "object" }, - "CloudAiNlLlmProtoServiceFunctionCall": { - "description": "Function call details.", - "id": "CloudAiNlLlmProtoServiceFunctionCall", + "GoogleCloudAiplatformV1beta1AddExecutionEventsRequest": { + "description": "Request message for MetadataService.AddExecutionEvents.", + "id": "GoogleCloudAiplatformV1beta1AddExecutionEventsRequest", "properties": { - "args": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" + "events": { + "description": "The Events to create and add.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1Event" }, - "description": "The function parameters and values in JSON format.", - "type": "object" - }, - "name": { - "description": "Required. The name of the function to call.", - "type": "string" + "type": "array" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceFunctionResponse": { - "description": "Function response details.", - "id": "CloudAiNlLlmProtoServiceFunctionResponse", + "GoogleCloudAiplatformV1beta1AddExecutionEventsResponse": { + "description": "Response message for MetadataService.AddExecutionEvents.", + "id": "GoogleCloudAiplatformV1beta1AddExecutionEventsResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1AddTrialMeasurementRequest": { + "description": "Request message for VizierService.AddTrialMeasurement.", + "id": "GoogleCloudAiplatformV1beta1AddTrialMeasurementRequest", "properties": { - "name": { - "description": "Required. The name of the function to call.", - "type": "string" - }, - "response": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" - }, - "description": "Required. The function response in JSON object format.", - "type": "object" + "measurement": { + "$ref": "GoogleCloudAiplatformV1beta1Measurement", + "description": "Required. The measurement to be added to a Trial." } }, "type": "object" }, - "CloudAiNlLlmProtoServiceGenerateMultiModalResponse": { - "id": "CloudAiNlLlmProtoServiceGenerateMultiModalResponse", + "GoogleCloudAiplatformV1beta1Annotation": { + "description": "Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem.", + "id": "GoogleCloudAiplatformV1beta1Annotation", "properties": { - "candidates": { - "description": "Possible candidate responses to the conversation up until this point.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceCandidate" - }, - "type": "array" + "annotationSource": { + "$ref": "GoogleCloudAiplatformV1beta1UserActionReference", + "description": "Output only. The source of the Annotation.", + "readOnly": true + }, + "createTime": { + "description": "Output only. Timestamp when this Annotation was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" }, - "debugMetadata": { - "$ref": "CloudAiNlLlmProtoServiceMessageMetadata", - "description": "Debug information containing message metadata. Clients should not consume this field, and this is only populated for Flow Runner path." + "etag": { + "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", + "type": "string" }, - "facts": { - "description": "External facts retrieved for factuality/grounding.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceFact" + "labels": { + "additionalProperties": { + "type": "string" }, - "type": "array" + "description": "Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with \"aiplatform.googleapis.com/\" and are immutable. Following system labels exist for each Annotation: * \"aiplatform.googleapis.com/annotation_set_name\": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * \"aiplatform.googleapis.com/payload_schema\": output only, its value is the payload_schema's title.", + "type": "object" }, - "promptFeedback": { - "$ref": "CloudAiNlLlmProtoServicePromptFeedback", - "description": "Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations." + "name": { + "description": "Output only. Resource name of the Annotation.", + "readOnly": true, + "type": "string" }, - "reportingMetrics": { - "$ref": "IntelligenceCloudAutomlXpsReportingMetrics", - "description": "Billable prediction metrics." + "payload": { + "description": "Required. The schema of the payload can be found in payload_schema.", + "type": "any" }, - "usageMetadata": { - "$ref": "CloudAiNlLlmProtoServiceUsageMetadata", - "description": "Usage metadata about the response(s)." + "payloadSchemaUri": { + "description": "Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata.", + "type": "string" + }, + "updateTime": { + "description": "Output only. Timestamp when this Annotation was last updated.", + "format": "google-datetime", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceMessageMetadata": { - "id": "CloudAiNlLlmProtoServiceMessageMetadata", + "GoogleCloudAiplatformV1beta1AnnotationSpec": { + "description": "Identifies a concept with which DataItems may be annotated with.", + "id": "GoogleCloudAiplatformV1beta1AnnotationSpec", "properties": { - "factualityDebugMetadata": { - "$ref": "LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata", - "description": "Factuality-related debug metadata." + "createTime": { + "description": "Output only. Timestamp when this AnnotationSpec was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" }, - "inputFilterInfo": { - "$ref": "LearningServingLlmMessageMetadata", - "description": "Filter metadata of the input messages." + "displayName": { + "description": "Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters.", + "type": "string" }, - "modelRoutingDecision": { - "$ref": "LearningGenaiRootRoutingDecision", - "description": "This score is generated by the router model to decide which model to use" + "etag": { + "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", + "type": "string" }, - "outputFilterInfo": { - "description": "Filter metadata of the output messages.", - "items": { - "$ref": "LearningServingLlmMessageMetadata" - }, - "type": "array" + "name": { + "description": "Output only. Resource name of the AnnotationSpec.", + "readOnly": true, + "type": "string" + }, + "updateTime": { + "description": "Output only. Timestamp when AnnotationSpec was last updated.", + "format": "google-datetime", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServicePart": { - "description": "A single part of a message.", - "id": "CloudAiNlLlmProtoServicePart", + "GoogleCloudAiplatformV1beta1Artifact": { + "description": "Instance of a general artifact.", + "id": "GoogleCloudAiplatformV1beta1Artifact", "properties": { - "documentMetadata": { - "$ref": "CloudAiNlLlmProtoServicePartDocumentMetadata", - "description": "Document metadata. The metadata should only be used by the Cloud LLM when supporting document mime types. It will only be populated when this image input part is converted from a document input part." + "createTime": { + "description": "Output only. Timestamp when this Artifact was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" }, - "fileData": { - "$ref": "CloudAiNlLlmProtoServicePartFileData", - "description": "URI-based data." + "description": { + "description": "Description of the Artifact", + "type": "string" }, - "functionCall": { - "$ref": "CloudAiNlLlmProtoServiceFunctionCall", - "description": "Function call data." + "displayName": { + "description": "User provided display name of the Artifact. May be up to 128 Unicode characters.", + "type": "string" }, - "functionResponse": { - "$ref": "CloudAiNlLlmProtoServiceFunctionResponse", - "description": "Function response data." + "etag": { + "description": "An eTag used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", + "type": "string" }, - "inlineData": { - "$ref": "CloudAiNlLlmProtoServicePartBlob", - "description": "Inline bytes data" + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded).", + "type": "object" }, - "lmRootMetadata": { - "$ref": "CloudAiNlLlmProtoServicePartLMRootMetadata", - "description": "Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields." + "metadata": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB.", + "type": "object" }, - "text": { - "description": "Text input.", + "name": { + "description": "Output only. The resource name of the Artifact.", + "readOnly": true, "type": "string" }, - "videoMetadata": { - "$ref": "CloudAiNlLlmProtoServicePartVideoMetadata", - "description": "Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data." - } - }, - "type": "object" - }, - "CloudAiNlLlmProtoServicePartBlob": { - "description": "Represents arbitrary blob data input.", - "id": "CloudAiNlLlmProtoServicePartBlob", - "properties": { - "data": { - "description": "Inline data.", - "format": "byte", + "schemaTitle": { + "description": "The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", "type": "string" }, - "mimeType": { - "description": "The mime type corresponding to this input.", + "schemaVersion": { + "description": "The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", "type": "string" }, - "originalFileData": { - "$ref": "CloudAiNlLlmProtoServicePartFileData", - "description": "Original file data where the blob comes from." - } - }, - "type": "object" - }, - "CloudAiNlLlmProtoServicePartDocumentMetadata": { - "description": "Metadata describes the original input document content.", - "id": "CloudAiNlLlmProtoServicePartDocumentMetadata", - "properties": { - "originalDocumentBlob": { - "$ref": "CloudAiNlLlmProtoServicePartBlob", - "description": "The original document blob." - }, - "pageNumber": { - "description": "The (1-indexed) page number of the image in the original document. The first page carries the original document content and mime type.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "CloudAiNlLlmProtoServicePartFileData": { - "description": "Represents file data.", - "id": "CloudAiNlLlmProtoServicePartFileData", - "properties": { - "fileUri": { - "description": "Inline data.", + "state": { + "description": "The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions.", + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "LIVE" + ], + "enumDescriptions": [ + "Unspecified state for the Artifact.", + "A state used by systems like Vertex AI Pipelines to indicate that the underlying data item represented by this Artifact is being created.", + "A state indicating that the Artifact should exist, unless something external to the system deletes it." + ], "type": "string" }, - "mimeType": { - "description": "The mime type corresponding to this input.", + "updateTime": { + "description": "Output only. Timestamp when this Artifact was last updated.", + "format": "google-datetime", + "readOnly": true, "type": "string" - } - }, - "type": "object" - }, - "CloudAiNlLlmProtoServicePartLMRootMetadata": { - "description": "Metadata provides extra info for building the LM Root request.", - "id": "CloudAiNlLlmProtoServicePartLMRootMetadata", - "properties": { - "chunkId": { - "description": "Chunk id that will be used when mapping the part to the LM Root's chunk.", + }, + "uri": { + "description": "The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file.", "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServicePartVideoMetadata": { - "description": "Metadata describes the input video content.", - "id": "CloudAiNlLlmProtoServicePartVideoMetadata", + "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeOperationMetadata": { + "description": "Metadata information for NotebookService.AssignNotebookRuntime.", + "id": "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeOperationMetadata", "properties": { - "endOffset": { - "description": "The end offset of the video.", - "format": "google-duration", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "The operation generic information." }, - "startOffset": { - "description": "The start offset of the video.", - "format": "google-duration", + "progressMessage": { + "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServicePromptFeedback": { - "description": "Content filter results for a prompt sent in the request.", - "id": "CloudAiNlLlmProtoServicePromptFeedback", + "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeRequest": { + "description": "Request message for NotebookService.AssignNotebookRuntime.", + "id": "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeRequest", "properties": { - "blockReason": { - "description": "Blocked reason.", - "enum": [ - "BLOCKED_REASON_UNSPECIFIED", - "SAFETY", - "OTHER", - "BLOCKLIST", - "PROHIBITED_CONTENT" - ], - "enumDescriptions": [ - "Unspecified blocked reason.", - "Candidates blocked due to safety.", - "Candidates blocked due to other reason (currently only language filter).", - "Candidates blocked due to the terms which are included from the terminology blocklist.", - "Candidates blocked due to prohibited content (currently only CSAM)." - ], - "type": "string" + "notebookRuntime": { + "$ref": "GoogleCloudAiplatformV1beta1NotebookRuntime", + "description": "Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment." }, - "blockReasonMessage": { - "description": "A readable block reason message.", + "notebookRuntimeId": { + "description": "Optional. User specified ID for the notebook runtime.", "type": "string" }, - "safetyRatings": { - "description": "Safety ratings.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceSafetyRating" - }, - "type": "array" + "notebookRuntimeTemplate": { + "description": "Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one).", + "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceRaiResult": { - "description": "The RAI results for a given text. Next ID: 12", - "id": "CloudAiNlLlmProtoServiceRaiResult", + "GoogleCloudAiplatformV1beta1Attribution": { + "description": "Attribution that explains a particular prediction output.", + "id": "GoogleCloudAiplatformV1beta1Attribution", "properties": { - "aidaRecitationResult": { - "$ref": "LanguageLabsAidaTrustRecitationProtoRecitationResult", - "description": "Recitation result from Aida recitation checker." - }, - "blocked": { - "deprecated": true, - "description": "Use `triggered_blocklist`.", - "type": "boolean" - }, - "errorCodes": { - "description": "The error codes indicate which RAI filters block the response.", - "items": { - "format": "int32", - "type": "integer" - }, - "type": "array" + "approximationError": { + "description": "Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.", + "format": "double", + "readOnly": true, + "type": "number" }, - "filtered": { - "description": "Whether the text should be filtered and not shown to the end user. This is determined based on a combination of `triggered_recitation`, `triggered_blocklist`, `language_filter_result`, and `triggered_safety_filter`.", - "type": "boolean" + "baselineOutputValue": { + "description": "Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.", + "format": "double", + "readOnly": true, + "type": "number" }, - "languageFilterResult": { - "$ref": "LearningGenaiRootLanguageFilterResult", - "description": "Language filter result from SAFT LangId." + "featureAttributions": { + "description": "Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).", + "readOnly": true, + "type": "any" }, - "mmRecitationResult": { - "$ref": "LearningGenaiRecitationMMRecitationCheckResult", - "description": "Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked." + "instanceOutputValue": { + "description": "Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.", + "format": "double", + "readOnly": true, + "type": "number" }, - "raiSignals": { - "description": "The RAI signals for the text.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceRaiSignal" - }, - "type": "array" + "outputDisplayName": { + "description": "Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.", + "readOnly": true, + "type": "string" }, - "translationRequestInfos": { - "description": "Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server.", + "outputIndex": { + "description": "Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.", "items": { - "$ref": "LearningGenaiRootTranslationRequestInfo" + "format": "int32", + "type": "integer" }, + "readOnly": true, "type": "array" }, - "triggeredBlocklist": { - "description": "Whether the text triggered the blocklist.", - "type": "boolean" - }, - "triggeredRecitation": { - "description": "Whether the text should be blocked by the recitation result from Aida recitation checker. It is determined from aida_recitation_result.", - "type": "boolean" - }, - "triggeredSafetyFilter": { - "description": "Whether the text triggered the safety filter. Currently, this is due to CSAI triggering or one of four categories (derogatory, sexual, toxic, violent) having a score over the filter threshold.", - "type": "boolean" + "outputName": { + "description": "Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceRaiSignal": { - "description": "An RAI signal for a single category.", - "id": "CloudAiNlLlmProtoServiceRaiSignal", + "GoogleCloudAiplatformV1beta1AuthConfig": { + "description": "Auth configuration to run the extension.", + "id": "GoogleCloudAiplatformV1beta1AuthConfig", "properties": { - "confidence": { - "description": "The confidence level for the RAI category.", - "enum": [ - "CONFIDENCE_UNSPECIFIED", - "CONFIDENCE_NONE", - "CONFIDENCE_LOW", - "CONFIDENCE_MEDIUM", - "CONFIDENCE_HIGH" + "apiKeyConfig": { + "$ref": "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig", + "description": "Config for API key auth." + }, + "authType": { + "description": "Type of auth scheme.", + "enum": [ + "AUTH_TYPE_UNSPECIFIED", + "NO_AUTH", + "API_KEY_AUTH", + "HTTP_BASIC_AUTH", + "GOOGLE_SERVICE_ACCOUNT_AUTH", + "OAUTH", + "OIDC_AUTH" ], "enumDescriptions": [ "", - "", - "", - "", - "" + "No Auth.", + "API Key Auth.", + "HTTP Basic Auth.", + "Google Service Account Auth.", + "OAuth auth.", + "OpenID Connect (OIDC) Auth." ], "type": "string" }, - "flagged": { - "description": "Whether the category is flagged as being present. Currently, this is set to true if score >= 0.5.", - "type": "boolean" + "googleServiceAccountConfig": { + "$ref": "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig", + "description": "Config for Google Service Account auth." }, - "influentialTerms": { - "description": "The influential terms that could potentially block the response.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm" - }, - "type": "array" + "httpBasicAuthConfig": { + "$ref": "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig", + "description": "Config for HTTP Basic auth." }, - "raiCategory": { - "description": "The RAI category.", - "enum": [ - "RAI_CATEGORY_UNSPECIFIED", - "TOXIC", - "SEXUALLY_EXPLICIT", - "HATE_SPEECH", - "VIOLENT", - "PROFANITY", - "HARASSMENT", - "DEATH_HARM_TRAGEDY", - "FIREARMS_WEAPONS", - "PUBLIC_SAFETY", - "HEALTH", - "RELIGIOUS_BELIEF", - "ILLICIT_DRUGS", - "WAR_CONFLICT", - "POLITICS", - "FINANCE", - "LEGAL", - "CSAI", - "FRINGE", - "THREAT", - "SEVERE_TOXICITY", - "TOXICITY", - "SEXUAL", - "INSULT", - "DEROGATORY", - "IDENTITY_ATTACK", - "VIOLENCE_ABUSE", - "OBSCENE", - "DRUGS", - "CSAM", - "SPII", - "DANGEROUS_CONTENT", - "DANGEROUS_CONTENT_SEVERITY", - "INSULT_SEVERITY", - "DEROGATORY_SEVERITY", - "SEXUAL_SEVERITY" - ], - "enumDescriptions": [ - "", - "SafetyCat categories.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "GRAIL categories that can't be exposed to end users.", - "", - "Unused categories.", - "", - "Old category names.", - "", - "", - "", - "", - "", - "", - "", - "CSAM V2", - "SPII", - "New SafetyCat v3 categories", - "", - "", - "", - "" - ], - "type": "string" + "oauthConfig": { + "$ref": "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig", + "description": "Config for user oauth." }, - "score": { - "description": "The score for the category, in the range [0.0, 1.0].", - "format": "float", - "type": "number" + "oidcConfig": { + "$ref": "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig", + "description": "Config for user OIDC auth." } }, "type": "object" }, - "CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm": { - "description": "The influential term that could potentially block the response.", - "id": "CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm", + "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig": { + "description": "Config for authentication with API key.", + "id": "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig", "properties": { - "beginOffset": { - "description": "The beginning offset of the influential term.", - "format": "int32", - "type": "integer" - }, - "confidence": { - "description": "The confidence score of the influential term.", - "format": "float", - "type": "number" + "apiKeySecret": { + "description": "Required. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", + "type": "string" }, - "source": { - "description": "The source of the influential term, prompt or response.", + "httpElementLocation": { + "description": "Required. The location of the API key.", "enum": [ - "SOURCE_UNSPECIFIED", - "PROMPT", - "RESPONSE" + "HTTP_IN_UNSPECIFIED", + "HTTP_IN_QUERY", + "HTTP_IN_HEADER", + "HTTP_IN_PATH", + "HTTP_IN_BODY", + "HTTP_IN_COOKIE" ], "enumDescriptions": [ - "Unspecified source.", - "The influential term comes from the prompt.", - "The influential term comes from the response." + "", + "Element is in the HTTP request query.", + "Element is in the HTTP request header.", + "Element is in the HTTP request path.", + "Element is in the HTTP request body.", + "Element is in the HTTP request cookie." ], "type": "string" }, - "term": { - "description": "The influential term.", + "name": { + "description": "Required. The parameter name of the API key. E.g. If the API request is \"https://example.com/act?api_key=\", \"api_key\" would be the parameter name.", "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceSafetyRating": { - "description": "Safety rating corresponding to the generated content.", - "id": "CloudAiNlLlmProtoServiceSafetyRating", + "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig": { + "description": "Config for Google Service Account Authentication.", + "id": "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig", "properties": { - "blocked": { - "description": "Indicates whether the content was filtered out because of this rating.", - "type": "boolean" - }, - "category": { - "description": "Harm category.", - "enum": [ - "HARM_CATEGORY_UNSPECIFIED", - "HARM_CATEGORY_HATE_SPEECH", - "HARM_CATEGORY_DANGEROUS_CONTENT", - "HARM_CATEGORY_HARASSMENT", - "HARM_CATEGORY_SEXUALLY_EXPLICIT" - ], - "enumDescriptions": [ - "The harm category is unspecified.", - "The harm category is hate speech.", - "The harm category is dengerous content.", - "The harm category is harassment.", - "The harm category is sexually explicit." - ], + "serviceAccount": { + "description": "Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension.", "type": "string" - }, - "influentialTerms": { - "description": "The influential terms that could potentially block the response.", - "items": { - "$ref": "CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm" - }, - "type": "array" - }, - "probability": { - "description": "Harm probability levels in the content.", - "enum": [ - "HARM_PROBABILITY_UNSPECIFIED", - "NEGLIGIBLE", - "LOW", - "MEDIUM", - "HIGH" - ], - "enumDescriptions": [ - "Harm probability unspecified.", - "Negligible level of harm.", - "Low level of harm.", - "Medium level of harm.", - "High level of harm." - ], + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig": { + "description": "Config for HTTP Basic Authentication.", + "id": "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig", + "properties": { + "credentialSecret": { + "description": "Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", "type": "string" - }, - "probabilityScore": { - "description": "Harm probability score.", - "format": "float", - "type": "number" - }, - "severity": { - "description": "Harm severity levels in the content.", - "enum": [ - "HARM_SEVERITY_UNSPECIFIED", - "HARM_SEVERITY_NEGLIGIBLE", - "HARM_SEVERITY_LOW", - "HARM_SEVERITY_MEDIUM", - "HARM_SEVERITY_HIGH" - ], - "enumDescriptions": [ - "Harm severity unspecified.", - "Negligible level of harm severity.", - "Low level of harm severity.", - "Medium level of harm severity.", - "High level of harm severity." - ], + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig": { + "description": "Config for user oauth.", + "id": "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig", + "properties": { + "accessToken": { + "description": "Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", "type": "string" }, - "severityScore": { - "description": "Harm severity score.", - "format": "float", - "type": "number" + "serviceAccount": { + "description": "The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account.", + "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm": { - "description": "The influential term that could potentially block the response.", - "id": "CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm", + "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig": { + "description": "Config for user OIDC auth.", + "id": "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig", "properties": { - "beginOffset": { - "description": "The beginning offset of the influential term.", - "format": "int32", - "type": "integer" - }, - "confidence": { - "description": "The confidence score of the influential term.", - "format": "float", - "type": "number" - }, - "source": { - "description": "The source of the influential term, prompt or response.", - "enum": [ - "SOURCE_UNSPECIFIED", - "PROMPT", - "RESPONSE" - ], - "enumDescriptions": [ - "Unspecified source.", - "The influential term comes from the prompt.", - "The influential term comes from the response." - ], + "idToken": { + "description": "OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", "type": "string" }, - "term": { - "description": "The influential term.", + "serviceAccount": { + "description": "The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents).", "type": "string" } }, "type": "object" }, - "CloudAiNlLlmProtoServiceUsageMetadata": { - "description": "Usage metadata about response(s).", - "id": "CloudAiNlLlmProtoServiceUsageMetadata", + "GoogleCloudAiplatformV1beta1AutomaticResources": { + "description": "A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines.", + "id": "GoogleCloudAiplatformV1beta1AutomaticResources", "properties": { - "candidatesTokenCount": { - "description": "Number of tokens in the response(s).", - "format": "int32", - "type": "integer" - }, - "promptTokenCount": { - "description": "Number of tokens in the request.", + "maxReplicaCount": { + "description": "Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.", "format": "int32", "type": "integer" }, - "totalTokenCount": { + "minReplicaCount": { + "description": "Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.", "format": "int32", "type": "integer" } }, "type": "object" }, - "GoogleApiHttpBody": { - "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.", - "id": "GoogleApiHttpBody", + "GoogleCloudAiplatformV1beta1AutoscalingMetricSpec": { + "description": "The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count.", + "id": "GoogleCloudAiplatformV1beta1AutoscalingMetricSpec", "properties": { - "contentType": { - "description": "The HTTP Content-Type header value specifying the content type of the body.", - "type": "string" - }, - "data": { - "description": "The HTTP request/response body as raw binary.", - "format": "byte", + "metricName": { + "description": "Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`", "type": "string" }, - "extensions": { - "description": "Application specific response metadata. Must be set in the first response for streaming APIs.", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "type": "array" + "target": { + "description": "The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1ActiveLearningConfig": { - "description": "Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy.", - "id": "GoogleCloudAiplatformV1beta1ActiveLearningConfig", + "GoogleCloudAiplatformV1beta1AvroSource": { + "description": "The storage details for Avro input content.", + "id": "GoogleCloudAiplatformV1beta1AvroSource", "properties": { - "maxDataItemCount": { - "description": "Max number of human labeled DataItems.", - "format": "int64", - "type": "string" - }, - "maxDataItemPercentage": { - "description": "Max percent of total DataItems for human labeling.", - "format": "int32", - "type": "integer" - }, - "sampleConfig": { - "$ref": "GoogleCloudAiplatformV1beta1SampleConfig", - "description": "Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy." - }, - "trainingConfig": { - "$ref": "GoogleCloudAiplatformV1beta1TrainingConfig", - "description": "CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems." + "gcsSource": { + "$ref": "GoogleCloudAiplatformV1beta1GcsSource", + "description": "Required. Google Cloud Storage location." } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsRequest": { - "description": "Request message for MetadataService.AddContextArtifactsAndExecutions.", - "id": "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsRequest", + "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsRequest": { + "description": "Request message for PipelineService.BatchCancelPipelineJobs.", + "id": "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsRequest", "properties": { - "artifacts": { - "description": "The resource names of the Artifacts to attribute to the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`", + "names": { + "description": "Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}`", "items": { "type": "string" }, "type": "array" - }, - "executions": { - "description": "The resource names of the Executions to associate with the Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`", + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsResponse": { + "description": "Response message for PipelineService.BatchCancelPipelineJobs.", + "id": "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsResponse", + "properties": { + "pipelineJobs": { + "description": "PipelineJobs cancelled.", "items": { - "type": "string" + "$ref": "GoogleCloudAiplatformV1beta1PipelineJob" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsResponse": { - "description": "Response message for MetadataService.AddContextArtifactsAndExecutions.", - "id": "GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsResponse", - "properties": {}, + "GoogleCloudAiplatformV1beta1BatchCreateFeaturesOperationMetadata": { + "description": "Details of operations that perform batch create Features.", + "id": "GoogleCloudAiplatformV1beta1BatchCreateFeaturesOperationMetadata", + "properties": { + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for Feature." + } + }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AddContextChildrenRequest": { - "description": "Request message for MetadataService.AddContextChildren.", - "id": "GoogleCloudAiplatformV1beta1AddContextChildrenRequest", + "GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest": { + "description": "Request message for FeaturestoreService.BatchCreateFeatures.", + "id": "GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest", "properties": { - "childContexts": { - "description": "The resource names of the child Contexts.", + "requests": { + "description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", "items": { - "type": "string" + "$ref": "GoogleCloudAiplatformV1beta1CreateFeatureRequest" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AddContextChildrenResponse": { - "description": "Response message for MetadataService.AddContextChildren.", - "id": "GoogleCloudAiplatformV1beta1AddContextChildrenResponse", - "properties": {}, + "GoogleCloudAiplatformV1beta1BatchCreateFeaturesResponse": { + "description": "Response message for FeaturestoreService.BatchCreateFeatures.", + "id": "GoogleCloudAiplatformV1beta1BatchCreateFeaturesResponse", + "properties": { + "features": { + "description": "The Features created.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1Feature" + }, + "type": "array" + } + }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AddExecutionEventsRequest": { - "description": "Request message for MetadataService.AddExecutionEvents.", - "id": "GoogleCloudAiplatformV1beta1AddExecutionEventsRequest", + "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsRequest": { + "description": "Request message for TensorboardService.BatchCreateTensorboardRuns.", + "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsRequest", "properties": { - "events": { - "description": "The Events to create and add.", + "requests": { + "description": "Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch.", "items": { - "$ref": "GoogleCloudAiplatformV1beta1Event" + "$ref": "GoogleCloudAiplatformV1beta1CreateTensorboardRunRequest" }, "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AddExecutionEventsResponse": { - "description": "Response message for MetadataService.AddExecutionEvents.", - "id": "GoogleCloudAiplatformV1beta1AddExecutionEventsResponse", - "properties": {}, + "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsResponse": { + "description": "Response message for TensorboardService.BatchCreateTensorboardRuns.", + "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsResponse", + "properties": { + "tensorboardRuns": { + "description": "The created TensorboardRuns.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1TensorboardRun" + }, + "type": "array" + } + }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AddTrialMeasurementRequest": { - "description": "Request message for VizierService.AddTrialMeasurement.", - "id": "GoogleCloudAiplatformV1beta1AddTrialMeasurementRequest", + "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesRequest": { + "description": "Request message for TensorboardService.BatchCreateTensorboardTimeSeries.", + "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesRequest", "properties": { - "measurement": { - "$ref": "GoogleCloudAiplatformV1beta1Measurement", - "description": "Required. The measurement to be added to a Trial." + "requests": { + "description": "Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1CreateTensorboardTimeSeriesRequest" + }, + "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1Annotation": { - "description": "Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem.", - "id": "GoogleCloudAiplatformV1beta1Annotation", + "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesResponse": { + "description": "Response message for TensorboardService.BatchCreateTensorboardTimeSeries.", + "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesResponse", "properties": { - "annotationSource": { - "$ref": "GoogleCloudAiplatformV1beta1UserActionReference", - "description": "Output only. The source of the Annotation.", - "readOnly": true - }, - "createTime": { - "description": "Output only. Timestamp when this Annotation was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "etag": { - "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" + "tensorboardTimeSeries": { + "description": "The created TensorboardTimeSeries.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1TensorboardTimeSeries" }, - "description": "Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with \"aiplatform.googleapis.com/\" and are immutable. Following system labels exist for each Annotation: * \"aiplatform.googleapis.com/annotation_set_name\": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * \"aiplatform.googleapis.com/payload_schema\": output only, its value is the payload_schema's title.", - "type": "object" - }, - "name": { - "description": "Output only. Resource name of the Annotation.", - "readOnly": true, - "type": "string" - }, - "payload": { - "description": "Required. The schema of the payload can be found in payload_schema.", - "type": "any" - }, - "payloadSchemaUri": { - "description": "Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata.", - "type": "string" - }, - "updateTime": { - "description": "Output only. Timestamp when this Annotation was last updated.", - "format": "google-datetime", - "readOnly": true, - "type": "string" + "type": "array" } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1AnnotationSpec": { - "description": "Identifies a concept with which DataItems may be annotated with.", - "id": "GoogleCloudAiplatformV1beta1AnnotationSpec", + "GoogleCloudAiplatformV1beta1BatchDedicatedResources": { + "description": "A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration.", + "id": "GoogleCloudAiplatformV1beta1BatchDedicatedResources", "properties": { - "createTime": { - "description": "Output only. Timestamp when this AnnotationSpec was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "displayName": { - "description": "Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters.", - "type": "string" - }, - "etag": { - "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", - "type": "string" + "machineSpec": { + "$ref": "GoogleCloudAiplatformV1beta1MachineSpec", + "description": "Required. Immutable. The specification of a single machine." }, - "name": { - "description": "Output only. Resource name of the AnnotationSpec.", - "readOnly": true, - "type": "string" + "maxReplicaCount": { + "description": "Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10.", + "format": "int32", + "type": "integer" }, - "updateTime": { - "description": "Output only. Timestamp when AnnotationSpec was last updated.", - "format": "google-datetime", - "readOnly": true, - "type": "string" + "startingReplicaCount": { + "description": "Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "GoogleCloudAiplatformV1beta1Artifact": { - "description": "Instance of a general artifact.", - "id": "GoogleCloudAiplatformV1beta1Artifact", - "properties": { - "createTime": { - "description": "Output only. Timestamp when this Artifact was created.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Description of the Artifact", - "type": "string" - }, - "displayName": { - "description": "User provided display name of the Artifact. May be up to 128 Unicode characters.", - "type": "string" - }, - "etag": { - "description": "An eTag used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded).", - "type": "object" - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" - }, - "description": "Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB.", - "type": "object" - }, - "name": { - "description": "Output only. The resource name of the Artifact.", - "readOnly": true, - "type": "string" - }, - "schemaTitle": { - "description": "The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", - "type": "string" - }, - "schemaVersion": { - "description": "The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store.", - "type": "string" - }, - "state": { - "description": "The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions.", - "enum": [ - "STATE_UNSPECIFIED", - "PENDING", - "LIVE" - ], - "enumDescriptions": [ - "Unspecified state for the Artifact.", - "A state used by systems like Vertex AI Pipelines to indicate that the underlying data item represented by this Artifact is being created.", - "A state indicating that the Artifact should exist, unless something external to the system deletes it." - ], - "type": "string" - }, - "updateTime": { - "description": "Output only. Timestamp when this Artifact was last updated.", - "format": "google-datetime", - "readOnly": true, - "type": "string" - }, - "uri": { - "description": "The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeOperationMetadata": { - "description": "Metadata information for NotebookService.AssignNotebookRuntime.", - "id": "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "The operation generic information." - }, - "progressMessage": { - "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeRequest": { - "description": "Request message for NotebookService.AssignNotebookRuntime.", - "id": "GoogleCloudAiplatformV1beta1AssignNotebookRuntimeRequest", - "properties": { - "notebookRuntime": { - "$ref": "GoogleCloudAiplatformV1beta1NotebookRuntime", - "description": "Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment." - }, - "notebookRuntimeId": { - "description": "Optional. User specified ID for the notebook runtime.", - "type": "string" - }, - "notebookRuntimeTemplate": { - "description": "Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one).", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1Attribution": { - "description": "Attribution that explains a particular prediction output.", - "id": "GoogleCloudAiplatformV1beta1Attribution", - "properties": { - "approximationError": { - "description": "Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.", - "format": "double", - "readOnly": true, - "type": "number" - }, - "baselineOutputValue": { - "description": "Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.", - "format": "double", - "readOnly": true, - "type": "number" - }, - "featureAttributions": { - "description": "Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).", - "readOnly": true, - "type": "any" - }, - "instanceOutputValue": { - "description": "Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.", - "format": "double", - "readOnly": true, - "type": "number" - }, - "outputDisplayName": { - "description": "Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.", - "readOnly": true, - "type": "string" - }, - "outputIndex": { - "description": "Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.", - "items": { - "format": "int32", - "type": "integer" - }, - "readOnly": true, - "type": "array" - }, - "outputName": { - "description": "Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AuthConfig": { - "description": "Auth configuration to run the extension.", - "id": "GoogleCloudAiplatformV1beta1AuthConfig", - "properties": { - "apiKeyConfig": { - "$ref": "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig", - "description": "Config for API key auth." - }, - "authType": { - "description": "Type of auth scheme.", - "enum": [ - "AUTH_TYPE_UNSPECIFIED", - "NO_AUTH", - "API_KEY_AUTH", - "HTTP_BASIC_AUTH", - "GOOGLE_SERVICE_ACCOUNT_AUTH", - "OAUTH", - "OIDC_AUTH" - ], - "enumDescriptions": [ - "", - "No Auth.", - "API Key Auth.", - "HTTP Basic Auth.", - "Google Service Account Auth.", - "OAuth auth.", - "OpenID Connect (OIDC) Auth." - ], - "type": "string" - }, - "googleServiceAccountConfig": { - "$ref": "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig", - "description": "Config for Google Service Account auth." - }, - "httpBasicAuthConfig": { - "$ref": "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig", - "description": "Config for HTTP Basic auth." - }, - "oauthConfig": { - "$ref": "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig", - "description": "Config for user oauth." - }, - "oidcConfig": { - "$ref": "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig", - "description": "Config for user OIDC auth." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig": { - "description": "Config for authentication with API key.", - "id": "GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig", - "properties": { - "apiKeySecret": { - "description": "Required. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", - "type": "string" - }, - "httpElementLocation": { - "description": "Required. The location of the API key.", - "enum": [ - "HTTP_IN_UNSPECIFIED", - "HTTP_IN_QUERY", - "HTTP_IN_HEADER", - "HTTP_IN_PATH", - "HTTP_IN_BODY", - "HTTP_IN_COOKIE" - ], - "enumDescriptions": [ - "", - "Element is in the HTTP request query.", - "Element is in the HTTP request header.", - "Element is in the HTTP request path.", - "Element is in the HTTP request body.", - "Element is in the HTTP request cookie." - ], - "type": "string" - }, - "name": { - "description": "Required. The parameter name of the API key. E.g. If the API request is \"https://example.com/act?api_key=\", \"api_key\" would be the parameter name.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig": { - "description": "Config for Google Service Account Authentication.", - "id": "GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig", - "properties": { - "serviceAccount": { - "description": "Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig": { - "description": "Config for HTTP Basic Authentication.", - "id": "GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig", - "properties": { - "credentialSecret": { - "description": "Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig": { - "description": "Config for user oauth.", - "id": "GoogleCloudAiplatformV1beta1AuthConfigOauthConfig", - "properties": { - "accessToken": { - "description": "Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", - "type": "string" - }, - "serviceAccount": { - "description": "The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig": { - "description": "Config for user OIDC auth.", - "id": "GoogleCloudAiplatformV1beta1AuthConfigOidcConfig", - "properties": { - "idToken": { - "description": "OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", - "type": "string" - }, - "serviceAccount": { - "description": "The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents).", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AutomaticResources": { - "description": "A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines.", - "id": "GoogleCloudAiplatformV1beta1AutomaticResources", - "properties": { - "maxReplicaCount": { - "description": "Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.", - "format": "int32", - "type": "integer" - }, - "minReplicaCount": { - "description": "Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AutoscalingMetricSpec": { - "description": "The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count.", - "id": "GoogleCloudAiplatformV1beta1AutoscalingMetricSpec", - "properties": { - "metricName": { - "description": "Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`", - "type": "string" - }, - "target": { - "description": "The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1AvroSource": { - "description": "The storage details for Avro input content.", - "id": "GoogleCloudAiplatformV1beta1AvroSource", - "properties": { - "gcsSource": { - "$ref": "GoogleCloudAiplatformV1beta1GcsSource", - "description": "Required. Google Cloud Storage location." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsRequest": { - "description": "Request message for PipelineService.BatchCancelPipelineJobs.", - "id": "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsRequest", - "properties": { - "names": { - "description": "Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}`", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsResponse": { - "description": "Response message for PipelineService.BatchCancelPipelineJobs.", - "id": "GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsResponse", - "properties": { - "pipelineJobs": { - "description": "PipelineJobs cancelled.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1PipelineJob" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCreateFeaturesOperationMetadata": { - "description": "Details of operations that perform batch create Features.", - "id": "GoogleCloudAiplatformV1beta1BatchCreateFeaturesOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for Feature." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest": { - "description": "Request message for FeaturestoreService.BatchCreateFeatures.", - "id": "GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest", - "properties": { - "requests": { - "description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1CreateFeatureRequest" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCreateFeaturesResponse": { - "description": "Response message for FeaturestoreService.BatchCreateFeatures.", - "id": "GoogleCloudAiplatformV1beta1BatchCreateFeaturesResponse", - "properties": { - "features": { - "description": "The Features created.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1Feature" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsRequest": { - "description": "Request message for TensorboardService.BatchCreateTensorboardRuns.", - "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsRequest", - "properties": { - "requests": { - "description": "Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1CreateTensorboardRunRequest" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsResponse": { - "description": "Response message for TensorboardService.BatchCreateTensorboardRuns.", - "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsResponse", - "properties": { - "tensorboardRuns": { - "description": "The created TensorboardRuns.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1TensorboardRun" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesRequest": { - "description": "Request message for TensorboardService.BatchCreateTensorboardTimeSeries.", - "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesRequest", - "properties": { - "requests": { - "description": "Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1CreateTensorboardTimeSeriesRequest" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesResponse": { - "description": "Response message for TensorboardService.BatchCreateTensorboardTimeSeries.", - "id": "GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesResponse", - "properties": { - "tensorboardTimeSeries": { - "description": "The created TensorboardTimeSeries.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1TensorboardTimeSeries" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchDedicatedResources": { - "description": "A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration.", - "id": "GoogleCloudAiplatformV1beta1BatchDedicatedResources", - "properties": { - "machineSpec": { - "$ref": "GoogleCloudAiplatformV1beta1MachineSpec", - "description": "Required. Immutable. The specification of a single machine." - }, - "maxReplicaCount": { - "description": "Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10.", - "format": "int32", - "type": "integer" - }, - "startingReplicaCount": { - "description": "Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsRequest": { - "description": "Request message for PipelineService.BatchDeletePipelineJobs.", - "id": "GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsRequest", + "GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsRequest": { + "description": "Request message for PipelineService.BatchDeletePipelineJobs.", + "id": "GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsRequest", "properties": { "names": { "description": "Required. The names of the PipelineJobs to delete. A maximum of 32 PipelineJobs can be deleted in a batch. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}`", @@ -27036,6 +26204,22 @@ "description": "Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag.", "format": "int32", "type": "integer" + }, + "rrf": { + "$ref": "GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF", + "description": "Optional. Represents RRF algorithm that combines search results." + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF": { + "description": "Parameters for RRF algorithm that combines search results.", + "id": "GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF", + "properties": { + "alpha": { + "description": "Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense.", + "format": "float", + "type": "number" } }, "type": "object" @@ -27084,6 +26268,11 @@ "description": "The distance between the neighbor and the dense embedding query.", "format": "double", "type": "number" + }, + "sparseDistance": { + "description": "The distance between the neighbor and the query sparse_embedding.", + "format": "double", + "type": "number" } }, "type": "object" @@ -28248,6 +27437,10 @@ "$ref": "GoogleCloudAiplatformV1beta1IndexDatapointRestriction" }, "type": "array" + }, + "sparseEmbedding": { + "$ref": "GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding", + "description": "Optional. Feature embedding vector for sparse index." } }, "type": "object" @@ -28336,6 +27529,29 @@ }, "type": "object" }, + "GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding": { + "description": "Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions.", + "id": "GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding", + "properties": { + "dimensions": { + "description": "Required. The list of indexes for the embedding values of the sparse vector.", + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "values": { + "description": "Required. The list of embedding values of the sparse vector.", + "items": { + "format": "float", + "type": "number" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudAiplatformV1beta1IndexEndpoint": { "description": "Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes.", "id": "GoogleCloudAiplatformV1beta1IndexEndpoint", @@ -28448,6 +27664,12 @@ "readOnly": true, "type": "integer" }, + "sparseVectorsCount": { + "description": "Output only. The number of sparse vectors in the Index.", + "format": "int64", + "readOnly": true, + "type": "string" + }, "vectorsCount": { "description": "Output only. The number of dense vectors in the Index.", "format": "int64", @@ -32551,7 +31773,9 @@ "MULTIPLE_VALUES", "INVALID_NUMERIC_VALUE", "INVALID_ENCODING", - "INVALID_TOKEN_VALUE" + "INVALID_SPARSE_DIMENSIONS", + "INVALID_TOKEN_VALUE", + "INVALID_SPARSE_EMBEDDING" ], "enumDescriptions": [ "Default, shall not be used.", @@ -32568,7 +31792,9 @@ "Numeric restrict has multiple values specified.", "Numeric restrict has invalid numeric value specified.", "File is not in UTF_8 format.", - "Token restrict value is invalid." + "Error parsing sparse dimensions field.", + "Token restrict value is invalid.", + "Invalid sparse embedding." ], "type": "string" }, @@ -43545,6338 +42771,879 @@ "description": "Details of operations that perform update FeatureGroup.", "id": "GoogleCloudAiplatformV1beta1UpdateFeatureGroupOperationMetadata", "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for FeatureGroup." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateFeatureOnlineStoreOperationMetadata": { - "description": "Details of operations that perform update FeatureOnlineStore.", - "id": "GoogleCloudAiplatformV1beta1UpdateFeatureOnlineStoreOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for FeatureOnlineStore." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateFeatureOperationMetadata": { - "description": "Details of operations that perform update Feature.", - "id": "GoogleCloudAiplatformV1beta1UpdateFeatureOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for Feature Update." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateFeatureViewOperationMetadata": { - "description": "Details of operations that perform update FeatureView.", - "id": "GoogleCloudAiplatformV1beta1UpdateFeatureViewOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for FeatureView Update." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateFeaturestoreOperationMetadata": { - "description": "Details of operations that perform update Featurestore.", - "id": "GoogleCloudAiplatformV1beta1UpdateFeaturestoreOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for Featurestore." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateIndexOperationMetadata": { - "description": "Runtime operation information for IndexService.UpdateIndex.", - "id": "GoogleCloudAiplatformV1beta1UpdateIndexOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "The operation generic information." - }, - "nearestNeighborSearchOperationMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata", - "description": "The operation metadata with regard to Matching Engine Index operation." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateModelDeploymentMonitoringJobOperationMetadata": { - "description": "Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob.", - "id": "GoogleCloudAiplatformV1beta1UpdateModelDeploymentMonitoringJobOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "The operation generic information." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateModelMonitorOperationMetadata": { - "description": "Runtime operation information for ModelMonitoringService.UpdateModelMonitor.", - "id": "GoogleCloudAiplatformV1beta1UpdateModelMonitorOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "The operation generic information." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdatePersistentResourceOperationMetadata": { - "description": "Details of operations that perform update PersistentResource.", - "id": "GoogleCloudAiplatformV1beta1UpdatePersistentResourceOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for PersistentResource." - }, - "progressMessage": { - "description": "Progress Message for Update LRO", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateSpecialistPoolOperationMetadata": { - "description": "Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool.", - "id": "GoogleCloudAiplatformV1beta1UpdateSpecialistPoolOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "The operation generic information." - }, - "specialistPool": { - "description": "Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpdateTensorboardOperationMetadata": { - "description": "Details of operations that perform update Tensorboard.", - "id": "GoogleCloudAiplatformV1beta1UpdateTensorboardOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "Operation metadata for Tensorboard." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeOperationMetadata": { - "description": "Metadata information for NotebookService.UpgradeNotebookRuntime.", - "id": "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "The operation generic information." - }, - "progressMessage": { - "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeRequest": { - "description": "Request message for NotebookService.UpgradeNotebookRuntime.", - "id": "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeRequest", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UploadModelOperationMetadata": { - "description": "Details of ModelService.UploadModel operation.", - "id": "GoogleCloudAiplatformV1beta1UploadModelOperationMetadata", - "properties": { - "genericMetadata": { - "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", - "description": "The common part of the operation metadata." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UploadModelRequest": { - "description": "Request message for ModelService.UploadModel.", - "id": "GoogleCloudAiplatformV1beta1UploadModelRequest", - "properties": { - "model": { - "$ref": "GoogleCloudAiplatformV1beta1Model", - "description": "Required. The Model to create." - }, - "modelId": { - "description": "Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen.", - "type": "string" - }, - "parentModel": { - "description": "Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version.", - "type": "string" - }, - "serviceAccount": { - "description": "Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.).", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UploadModelResponse": { - "description": "Response message of ModelService.UploadModel operation.", - "id": "GoogleCloudAiplatformV1beta1UploadModelResponse", - "properties": { - "model": { - "description": "The name of the uploaded Model resource. Format: `projects/{project}/locations/{location}/models/{model}`", - "type": "string" - }, - "modelVersionId": { - "description": "Output only. The version ID of the model that is uploaded.", - "readOnly": true, - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UploadRagFileConfig": { - "description": "Config for uploading RagFile.", - "id": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", - "properties": { - "ragFileChunkingConfig": { - "$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", - "description": "Specifies the size and overlap of chunks after uploading RagFile." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UploadRagFileRequest": { - "description": "Request message for VertexRagDataService.UploadRagFile.", - "id": "GoogleCloudAiplatformV1beta1UploadRagFileRequest", - "properties": { - "ragFile": { - "$ref": "GoogleCloudAiplatformV1beta1RagFile", - "description": "Required. The RagFile to upload." - }, - "uploadRagFileConfig": { - "$ref": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", - "description": "Required. The config for the RagFiles to be uploaded into the RagCorpus. VertexRagDataService.UploadRagFile." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UploadRagFileResponse": { - "description": "Response message for VertexRagDataService.UploadRagFile.", - "id": "GoogleCloudAiplatformV1beta1UploadRagFileResponse", - "properties": { - "error": { - "$ref": "GoogleRpcStatus", - "description": "The error that occurred while processing the RagFile." - }, - "ragFile": { - "$ref": "GoogleCloudAiplatformV1beta1RagFile", - "description": "The RagFile that had been uploaded into the RagCorpus." - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest": { - "description": "Request message for IndexService.UpsertDatapoints", - "id": "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest", - "properties": { - "datapoints": { - "description": "A list of datapoints to be created/updated.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1IndexDatapoint" - }, - "type": "array" - }, - "updateMask": { - "description": "Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts.", - "format": "google-fieldmask", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UpsertDatapointsResponse": { - "description": "Response message for IndexService.UpsertDatapoints", - "id": "GoogleCloudAiplatformV1beta1UpsertDatapointsResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1UserActionReference": { - "description": "References an API call. It contains more information about long running operation and Jobs that are triggered by the API call.", - "id": "GoogleCloudAiplatformV1beta1UserActionReference", - "properties": { - "dataLabelingJob": { - "description": "For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`", - "type": "string" - }, - "method": { - "description": "The method name of the API RPC call. For example, \"/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset\"", - "type": "string" - }, - "operation": { - "description": "For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project}/locations/{location}/operations/{operation}`", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1Value": { - "description": "Value is the value of the field.", - "id": "GoogleCloudAiplatformV1beta1Value", - "properties": { - "doubleValue": { - "description": "A double value.", - "format": "double", - "type": "number" - }, - "intValue": { - "description": "An integer value.", - "format": "int64", - "type": "string" - }, - "stringValue": { - "description": "A string value.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1VertexAISearch": { - "description": "Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation", - "id": "GoogleCloudAiplatformV1beta1VertexAISearch", - "properties": { - "datastore": { - "description": "Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1VertexRagStore": { - "description": "Retrieve from Vertex RAG Store for grounding.", - "id": "GoogleCloudAiplatformV1beta1VertexRagStore", - "properties": { - "ragCorpora": { - "deprecated": true, - "description": "Optional. Deprecated. Please use rag_resources instead.", - "items": { - "type": "string" - }, - "type": "array" - }, - "ragResources": { - "description": "Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource" - }, - "type": "array" - }, - "similarityTopK": { - "description": "Optional. Number of top k results to return from the selected corpora.", - "format": "int32", - "type": "integer" - }, - "vectorDistanceThreshold": { - "description": "Optional. Only return results with vector distance smaller than the threshold.", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource": { - "description": "The definition of the Rag resource.", - "id": "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource", - "properties": { - "ragCorpus": { - "description": "Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", - "type": "string" - }, - "ragFileIds": { - "description": "Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1VideoMetadata": { - "description": "Metadata describes the input video content.", - "id": "GoogleCloudAiplatformV1beta1VideoMetadata", - "properties": { - "endOffset": { - "description": "Optional. The end offset of the video.", - "format": "google-duration", - "type": "string" - }, - "startOffset": { - "description": "Optional. The start offset of the video.", - "format": "google-duration", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WorkerPoolSpec": { - "description": "Represents the spec of a worker pool in a job.", - "id": "GoogleCloudAiplatformV1beta1WorkerPoolSpec", - "properties": { - "containerSpec": { - "$ref": "GoogleCloudAiplatformV1beta1ContainerSpec", - "description": "The custom container task." - }, - "diskSpec": { - "$ref": "GoogleCloudAiplatformV1beta1DiskSpec", - "description": "Disk spec." - }, - "machineSpec": { - "$ref": "GoogleCloudAiplatformV1beta1MachineSpec", - "description": "Optional. Immutable. The specification of a single machine." - }, - "nfsMounts": { - "description": "Optional. List of NFS mount spec.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1NfsMount" - }, - "type": "array" - }, - "pythonPackageSpec": { - "$ref": "GoogleCloudAiplatformV1beta1PythonPackageSpec", - "description": "The Python packaged task." - }, - "replicaCount": { - "description": "Optional. The number of worker replicas to use for this worker pool.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload": { - "description": "Contains Feature values to be written for a specific entity.", - "id": "GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload", - "properties": { - "entityId": { - "description": "Required. The ID of the entity.", - "type": "string" - }, - "featureValues": { - "additionalProperties": { - "$ref": "GoogleCloudAiplatformV1beta1FeatureValue" - }, - "description": "Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future.", - "type": "object" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WriteFeatureValuesRequest": { - "description": "Request message for FeaturestoreOnlineServingService.WriteFeatureValues.", - "id": "GoogleCloudAiplatformV1beta1WriteFeatureValuesRequest", - "properties": { - "payloads": { - "description": "Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WriteFeatureValuesResponse": { - "description": "Response message for FeaturestoreOnlineServingService.WriteFeatureValues.", - "id": "GoogleCloudAiplatformV1beta1WriteFeatureValuesResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataRequest": { - "description": "Request message for TensorboardService.WriteTensorboardExperimentData.", - "id": "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataRequest", - "properties": { - "writeRunDataRequests": { - "description": "Required. Requests containing per-run TensorboardTimeSeries data to write.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataResponse": { - "description": "Response message for TensorboardService.WriteTensorboardExperimentData.", - "id": "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest": { - "description": "Request message for TensorboardService.WriteTensorboardRunData.", - "id": "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest", - "properties": { - "tensorboardRun": { - "description": "Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`", - "type": "string" - }, - "timeSeriesData": { - "description": "Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000.", - "items": { - "$ref": "GoogleCloudAiplatformV1beta1TimeSeriesData" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataResponse": { - "description": "Response message for TensorboardService.WriteTensorboardRunData.", - "id": "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataResponse", - "properties": {}, - "type": "object" - }, - "GoogleCloudAiplatformV1beta1XraiAttribution": { - "description": "An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models.", - "id": "GoogleCloudAiplatformV1beta1XraiAttribution", - "properties": { - "blurBaselineConfig": { - "$ref": "GoogleCloudAiplatformV1beta1BlurBaselineConfig", - "description": "Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383" - }, - "smoothGradConfig": { - "$ref": "GoogleCloudAiplatformV1beta1SmoothGradConfig", - "description": "Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf" - }, - "stepCount": { - "description": "Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleCloudLocationListLocationsResponse": { - "description": "The response message for Locations.ListLocations.", - "id": "GoogleCloudLocationListLocationsResponse", - "properties": { - "locations": { - "description": "A list of locations that matches the specified filter in the request.", - "items": { - "$ref": "GoogleCloudLocationLocation" - }, - "type": "array" - }, - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleCloudLocationLocation": { - "description": "A resource that represents a Google Cloud location.", - "id": "GoogleCloudLocationLocation", - "properties": { - "displayName": { - "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", - "type": "object" - }, - "locationId": { - "description": "The canonical id for this location. For example: `\"us-east1\"`.", - "type": "string" - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata. For example the available capacity at the given location.", - "type": "object" - }, - "name": { - "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", - "type": "string" - } - }, - "type": "object" - }, - "GoogleIamV1Binding": { - "description": "Associates `members`, or principals, with a `role`.", - "id": "GoogleIamV1Binding", - "properties": { - "condition": { - "$ref": "GoogleTypeExpr", - "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." - }, - "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.", - "items": { - "type": "string" - }, - "type": "array" - }, - "role": { - "description": "Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).", - "type": "string" - } - }, - "type": "object" - }, - "GoogleIamV1GetIamPolicyRequest": { - "description": "Request message for `GetIamPolicy` method.", - "id": "GoogleIamV1GetIamPolicyRequest", - "properties": { - "options": { - "$ref": "GoogleIamV1GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." - } - }, - "type": "object" - }, - "GoogleIamV1GetPolicyOptions": { - "description": "Encapsulates settings provided to GetIamPolicy.", - "id": "GoogleIamV1GetPolicyOptions", - "properties": { - "requestedPolicyVersion": { - "description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleIamV1Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", - "id": "GoogleIamV1Policy", - "properties": { - "bindings": { - "description": "Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.", - "items": { - "$ref": "GoogleIamV1Binding" - }, - "type": "array" - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", - "format": "byte", - "type": "string" - }, - "version": { - "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleIamV1SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "id": "GoogleIamV1SetIamPolicyRequest", - "properties": { - "policy": { - "$ref": "GoogleIamV1Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." - } - }, - "type": "object" - }, - "GoogleIamV1TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", - "id": "GoogleIamV1TestIamPermissionsRequest", - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleIamV1TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", - "id": "GoogleIamV1TestIamPermissionsResponse", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleLongrunningListOperationsResponse": { - "description": "The response message for Operations.ListOperations.", - "id": "GoogleLongrunningListOperationsResponse", - "properties": { - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - }, - "operations": { - "description": "A list of operations that matches the specified filter in the request.", - "items": { - "$ref": "GoogleLongrunningOperation" - }, - "type": "array" - } - }, - "type": "object" - }, - "GoogleLongrunningOperation": { - "description": "This resource represents a long-running operation that is the result of a network API call.", - "id": "GoogleLongrunningOperation", - "properties": { - "done": { - "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", - "type": "boolean" - }, - "error": { - "$ref": "GoogleRpcStatus", - "description": "The error result of the operation in case of failure or cancellation." - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", - "type": "object" - }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", - "type": "string" - }, - "response": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", - "type": "object" - } - }, - "type": "object" - }, - "GoogleProtobufEmpty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", - "id": "GoogleProtobufEmpty", - "properties": {}, - "type": "object" - }, - "GoogleRpcStatus": { - "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", - "id": "GoogleRpcStatus", - "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" - }, - "details": { - "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleTypeColor": { - "description": "Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ...", - "id": "GoogleTypeColor", - "properties": { - "alpha": { - "description": "The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0).", - "format": "float", - "type": "number" - }, - "blue": { - "description": "The amount of blue in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - }, - "green": { - "description": "The amount of green in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - }, - "red": { - "description": "The amount of red in the color as a value in the interval [0, 1].", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "GoogleTypeDate": { - "description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp", - "id": "GoogleTypeDate", - "properties": { - "day": { - "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.", - "format": "int32", - "type": "integer" - }, - "month": { - "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.", - "format": "int32", - "type": "integer" - }, - "year": { - "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "GoogleTypeExpr": { - "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", - "id": "GoogleTypeExpr", - "properties": { - "description": { - "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", - "type": "string" - }, - "expression": { - "description": "Textual representation of an expression in Common Expression Language syntax.", - "type": "string" - }, - "location": { - "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", - "type": "string" - }, - "title": { - "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", - "type": "string" - } - }, - "type": "object" - }, - "GoogleTypeInterval": { - "description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", - "id": "GoogleTypeInterval", - "properties": { - "endTime": { - "description": "Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end.", - "format": "google-datetime", - "type": "string" - }, - "startTime": { - "description": "Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start.", - "format": "google-datetime", - "type": "string" - } - }, - "type": "object" - }, - "GoogleTypeMoney": { - "description": "Represents an amount of money with its currency type.", - "id": "GoogleTypeMoney", - "properties": { - "currencyCode": { - "description": "The three-letter currency code defined in ISO 4217.", - "type": "string" - }, - "nanos": { - "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", - "format": "int32", - "type": "integer" - }, - "units": { - "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "IntelligenceCloudAutomlXpsMetricEntry": { - "id": "IntelligenceCloudAutomlXpsMetricEntry", - "properties": { - "argentumMetricId": { - "description": "For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the \"cloudbilling.googleapis.com/argentum_metric_id\" label. Otherwise leave empty.", - "type": "string" - }, - "doubleValue": { - "description": "A double value.", - "format": "double", - "type": "number" - }, - "int64Value": { - "description": "A signed 64-bit integer value.", - "format": "int64", - "type": "string" - }, - "metricName": { - "description": "The metric name defined in the service configuration.", - "type": "string" - }, - "systemLabels": { - "description": "Billing system labels for this (metric, value) pair.", - "items": { - "$ref": "IntelligenceCloudAutomlXpsMetricEntryLabel" - }, - "type": "array" - } - }, - "type": "object" - }, - "IntelligenceCloudAutomlXpsMetricEntryLabel": { - "id": "IntelligenceCloudAutomlXpsMetricEntryLabel", - "properties": { - "labelName": { - "description": "The name of the label.", - "type": "string" - }, - "labelValue": { - "description": "The value of the label.", - "type": "string" - } - }, - "type": "object" - }, - "IntelligenceCloudAutomlXpsReportingMetrics": { - "id": "IntelligenceCloudAutomlXpsReportingMetrics", - "properties": { - "effectiveTrainingDuration": { - "deprecated": true, - "description": "The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set.", - "format": "google-duration", - "type": "string" - }, - "metricEntries": { - "description": "One entry per metric name. The values must be aggregated per metric name.", - "items": { - "$ref": "IntelligenceCloudAutomlXpsMetricEntry" - }, - "type": "array" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoDocAttribution": { - "description": "The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30", - "id": "LanguageLabsAidaTrustRecitationProtoDocAttribution", - "properties": { - "amarnaId": { - "type": "string" - }, - "arxivId": { - "type": "string" - }, - "author": { - "type": "string" - }, - "bibkey": { - "type": "string" - }, - "biorxivId": { - "description": "ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517", - "type": "string" - }, - "bookTitle": { - "type": "string" - }, - "bookVolumeId": { - "description": "The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set.", - "format": "int64", - "type": "string" - }, - "category": { - "enum": [ - "CATEGORY_UNSPECIFIED", - "CATEGORY_NEWS", - "CATEGORY_NON_NEWS_WEBDOC", - "CATEGORY_UNKNOWN_MISSING_SIGNAL" - ], - "enumDescriptions": [ - "", - "The doc has a url and the news classifier has classified this doc as news.", - "The doc has a url and the news classifier classified this doc as non-news.", - "The doc has a url but the url was missing from the news classifier URL table." - ], - "type": "string" - }, - "conversationId": { - "type": "string" - }, - "dataset": { - "description": "The dataset this document comes from.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Github dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset.", - "Mobile assistant finetune datasets.", - "", - "Genesis fine-tune datasets.", - "Cloud Security fine-tune datasets.", - "", - "", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inpsiration." - ], - "type": "string" - }, - "filepath": { - "type": "string" - }, - "geminiId": { - "type": "string" - }, - "gnewsArticleTitle": { - "type": "string" - }, - "goodallExampleId": { - "type": "string" - }, - "isOptOut": { - "description": "Whether the document is opted out.", - "type": "boolean" - }, - "isPrompt": { - "type": "boolean" - }, - "lamdaExampleId": { - "type": "string" - }, - "license": { - "type": "string" - }, - "meenaConversationId": { - "type": "string" - }, - "naturalLanguageCode": { - "description": "Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii.", - "type": "string" - }, - "noAttribution": { - "description": "True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available.", - "type": "boolean" - }, - "podcastUtteranceId": { - "type": "string" - }, - "publicationDate": { - "$ref": "GoogleTypeDate" - }, - "qualityScoreExperimentOnly": { - "description": "This field is for opt-out experiment only, MUST never be used during actual production/serving. ", - "format": "double", - "type": "number" - }, - "repo": { - "description": "Github repository", - "type": "string" - }, - "url": { - "description": "URL of a webdoc", - "type": "string" - }, - "volumeId": { - "type": "string" - }, - "wikipediaArticleTitle": { - "description": "Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset.", - "type": "string" - }, - "youtubeVideoId": { - "description": "The unique video id from Youtube. Example: AkoGsW52Ir0", - "type": "string" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoRecitationResult": { - "description": "The recitation result for one input", - "id": "LanguageLabsAidaTrustRecitationProtoRecitationResult", - "properties": { - "dynamicSegmentResults": { - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - }, - "recitationAction": { - "description": "The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will not be specified.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "trainingSegmentResults": { - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoSegmentResult": { - "description": "The recitation result for each segment in a given input.", - "id": "LanguageLabsAidaTrustRecitationProtoSegmentResult", - "properties": { - "attributionDataset": { - "description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Github dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset.", - "Mobile assistant finetune datasets.", - "", - "Genesis fine-tune datasets.", - "Cloud Security fine-tune datasets.", - "", - "", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inpsiration." - ], - "type": "string" - }, - "displayAttributionMessage": { - "description": "human-friendly string that contains information from doc_attribution which could be shown by clients", - "type": "string" - }, - "docAttribution": { - "$ref": "LanguageLabsAidaTrustRecitationProtoDocAttribution" - }, - "docOccurrences": { - "description": "number of documents that contained this segment", - "format": "int32", - "type": "integer" - }, - "endIndex": { - "format": "int32", - "type": "integer" - }, - "rawText": { - "description": "The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options.", - "type": "string" - }, - "segmentRecitationAction": { - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "sourceCategory": { - "description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", - "enum": [ - "SOURCE_CATEGORY_UNSPECIFIED", - "SOURCE_CATEGORY_WIKIPEDIA", - "SOURCE_CATEGORY_WEBDOCS", - "SOURCE_CATEGORY_GITHUB", - "SOURCE_CATEGORY_ARXIV", - "SOURCE_CATEGORY_PRIVATE_BOOKS", - "SOURCE_CATEGORY_OTHERS", - "SOURCE_CATEGORY_PUBLIC_BOOKS", - "SOURCE_CATEGORY_GNEWS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "startIndex": { - "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "LanguageLabsAidaTrustRecitationProtoStreamRecitationResult": { - "description": "The recitation result for one stream input", - "id": "LanguageLabsAidaTrustRecitationProtoStreamRecitationResult", - "properties": { - "dynamicSegmentResults": { - "description": "The recitation result against the given dynamic data source.", - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - }, - "fullyCheckedTextIndex": { - "description": "Last index of input text fully checked for recitation in the entire streaming context. Would return `-1` if no Input was checked for recitation.", - "format": "int32", - "type": "integer" - }, - "recitationAction": { - "description": "The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "trainingSegmentResults": { - "description": "The recitation result against model training data.", - "items": { - "$ref": "LanguageLabsAidaTrustRecitationProtoSegmentResult" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRecitationContentChunkRecitationCheckResult": { - "description": "Recitation check result for a single content chunk.", - "id": "LearningGenaiRecitationContentChunkRecitationCheckResult", - "properties": { - "imageResult": { - "$ref": "LearningGenaiRecitationImageRecitationCheckResult" - }, - "textResult": { - "$ref": "LearningGenaiRecitationRecitationResult" - } - }, - "type": "object" - }, - "LearningGenaiRecitationDocAttribution": { - "description": "The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30", - "id": "LearningGenaiRecitationDocAttribution", - "properties": { - "amarnaId": { - "type": "string" - }, - "arxivId": { - "type": "string" - }, - "author": { - "type": "string" - }, - "bibkey": { - "type": "string" - }, - "biorxivId": { - "description": "ID of the paper in bioarxiv like ddoi.org/{biorxiv_id} eg: https://doi.org/10.1101/343517", - "type": "string" - }, - "bookTitle": { - "type": "string" - }, - "bookVolumeId": { - "description": "The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set.", - "format": "int64", - "type": "string" - }, - "conversationId": { - "type": "string" - }, - "dataset": { - "description": "The dataset this document comes from.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "GitHub dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset", - "Mobile assistant finetune datasets.", - "", - "Genesis fine tuned datasets.", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Cloud Security fine tuned datasets.", - "", - "", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inspiration FT datasets." - ], - "type": "string" - }, - "filepath": { - "type": "string" - }, - "geminiId": { - "type": "string" - }, - "gnewsArticleTitle": { - "type": "string" - }, - "goodallExampleId": { - "type": "string" - }, - "isOptOut": { - "description": "Whether the document is opted out.", - "type": "boolean" - }, - "isPrompt": { - "description": "When true, this attribution came from the user's prompt.", - "type": "boolean" - }, - "lamdaExampleId": { - "type": "string" - }, - "license": { - "type": "string" - }, - "meenaConversationId": { - "type": "string" - }, - "naturalLanguageCode": { - "description": "Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii.", - "type": "string" - }, - "noAttribution": { - "description": "True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available.", - "type": "boolean" - }, - "podcastUtteranceId": { - "type": "string" - }, - "publicationDate": { - "$ref": "GoogleTypeDate" - }, - "qualityScoreExperimentOnly": { - "description": "This field is for opt-out experiment only, MUST never be used during actual production/serving. ", - "format": "double", - "type": "number" - }, - "repo": { - "description": "Github repository", - "type": "string" - }, - "url": { - "description": "URL of a webdoc", - "type": "string" - }, - "volumeId": { - "type": "string" - }, - "wikipediaArticleTitle": { - "description": "Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset.", - "type": "string" - }, - "youtubeVideoId": { - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRecitationImageDocAttribution": { - "description": "Attribution information about the recited image.", - "id": "LearningGenaiRecitationImageDocAttribution", - "properties": { - "datasetName": { - "description": "Unique ID of the image.", - "enum": [ - "IMAGE_DATA_SET_UNSPECIFIED", - "JUNO_SHUTTERSTOCK_V1", - "JUNO_V1_HIPR", - "JUNO_V1_WEBLI_AESTHETICS_V2_4_5", - "JUNO_V1_TIGG_DATA_V1", - "JUNO_V1_PINTEREST_A", - "JUNO_V1_PINTEREST_B", - "JUNO_V1_IMAGEJOINS", - "JUNO_V1_M1", - "JUNO_V1_M2_MISC", - "JUNO_V1_M2_ART", - "JUNO_V1_DEVIANTAR", - "IMAGE_PASSAGE" - ], - "enumDescriptions": [ - "", - "go/tigg-shutterstock", - "go/hipr-in-tigg-notes", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "go/image-passage" - ], - "type": "string" - }, - "stringDocids": { - "description": "Doc ID to identify the image. These could be urls of images or amarna id.", - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRecitationImageRecitationCheckResult": { - "id": "LearningGenaiRecitationImageRecitationCheckResult", - "properties": { - "recitationAction": { - "description": "Only has NO_ACTION or BLOCK to start with.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "recitedImages": { - "description": "Images that are similar to the requested image.", - "items": { - "$ref": "LearningGenaiRecitationImageRecitationCheckResultSimilarImage" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRecitationImageRecitationCheckResultSimilarImage": { - "id": "LearningGenaiRecitationImageRecitationCheckResultSimilarImage", - "properties": { - "docAttribution": { - "$ref": "LearningGenaiRecitationImageDocAttribution", - "description": "Attribution information about the image" - }, - "embeddingModel": { - "description": "The memorization embedding model that returned this image", - "enum": [ - "EMBEDDING_MODEL_UNSPECIFIED", - "STARBURST_V4", - "REISIM" - ], - "enumDescriptions": [ - "", - "Starburst V4, 64 float features.", - "Reisim, 128 Byte float feature" - ], - "type": "string" - }, - "imageId": { - "description": "Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image.", - "format": "uint64", - "type": "string" - }, - "scores": { - "description": "Similarity score of requested image compared with image in training data.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "LearningGenaiRecitationMMRecitationCheckResult": { - "description": "Recitation check result for a stream of content chunks (e.g. a model response).", - "id": "LearningGenaiRecitationMMRecitationCheckResult", - "properties": { - "chunkResults": { - "items": { - "$ref": "LearningGenaiRecitationContentChunkRecitationCheckResult" - }, - "type": "array" - }, - "recitationAction": { - "description": "Overall recommended recitation action for the content.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRecitationRecitationResult": { - "description": "The recitation result for one input", - "id": "LearningGenaiRecitationRecitationResult", - "properties": { - "dynamicSegmentResults": { - "items": { - "$ref": "LearningGenaiRecitationSegmentResult" - }, - "type": "array" - }, - "recitationAction": { - "description": "The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK > CITE > NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION.", - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "trainingSegmentResults": { - "items": { - "$ref": "LearningGenaiRecitationSegmentResult" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRecitationSegmentResult": { - "description": "The recitation result for each segment in a given input.", - "id": "LearningGenaiRecitationSegmentResult", - "properties": { - "attributionDataset": { - "description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", - "enum": [ - "DATASET_UNSPECIFIED", - "WIKIPEDIA", - "WEBDOCS", - "WEBDOCS_FINETUNE", - "GITHUB_MIRROR", - "BOOKS_FULL_VIEW", - "BOOKS_PRIVATE", - "GNEWS", - "ULM_DOCJOINS", - "ULM_DOCJOINS_DEDUPED", - "MEENA_FC", - "PODCAST", - "AQUA", - "WEB_ASR", - "BARD_GOLDEN", - "COMMON_SENSE_REASONING", - "MATH", - "MATH_REASONING", - "CLEAN_ARXIV", - "LAMDA_FACTUALITY_E2E_QUERY_GENERATION", - "LAMDA_FACTUALITY_E2E_RESPONSE_GENERATION", - "MASSIVE_FORUM_THREAD_SCORED_BARD", - "MASSIVE_FORUM_THREAD_SCORED_LONG_200", - "MASSIVE_FORUM_THREAD_SCORED_LONG_500", - "DOCUMENT_CHUNKS", - "MEENA_RESEARCH_PHASE_GOLDEN_MARKDOWN", - "MEENA_RESEARCH_PHASE_GOOGLERS", - "MEENA_RESPONSE_SAFETY_HUMAN_GEN", - "MEENA_RESPONSE_SAFETY_SCHEMA_NO_BROADCAST", - "MEENA_RESPONSE_SAFETY_V3_HUMAN_GEN2", - "MEENA_RESPONSE_SAFETY_V3_SCHEMA_NO_BROADCAST", - "LAMDA_FACTUALITY_TRIGGER", - "LAMDA_SAFETY_V2_SCHEMA_NO_BROADCAST", - "LAMDA_SSI_DISCRIMINATIVE", - "ASSISTANT_PERSONALITY_SAFETY", - "PODCAST_FINETUNE_DIALOG", - "WORLD_QUERY_GENERATOR", - "C4_JOINED_DOCJOINS", - "HOL4_THEORIES", - "HOL_LIGHT_THEORIES", - "HOLSTEPS", - "ISABELLE_STEP", - "ISABELLE_THEORIES", - "LEAN_MATHLIB_THEORIES", - "LEAN_STEP", - "MIZAR_THEORIES", - "COQ_STEP", - "COQ_THEORIES", - "AMPS_KHAN", - "AMPS_MATHEMATICA", - "CODEY_CODE", - "CODE_QA_SE", - "CODE_QA_SO", - "CODE_QA_FT_FORMAT", - "CODE_QA_FT_KNOWLEDGE", - "CODE_QA_GITHUB_FILTERED_CODE", - "BARD_PERSONALITY_GOLDEN", - "ULM_DOCJOINS_WITH_URLS_EN", - "ULM_DOCJOINS_WITH_URLS_I18N", - "GOODALL_MTV5_GITHUB", - "GOODALL_MTV5_BOOKS", - "GOODALL_MTV5_C4", - "GOODALL_MTV5_WIKIPEDIA", - "GOODALL_MW_TOP_100B", - "GOODALL_MW_STACK_EXCHANGE", - "GOODALL_MW_TOP_0_10B", - "GOODALL_MW_TOP_10B_20B", - "CODEY_NOTEBOOK_LM_PRETRAINING", - "VERTEX_SAFE_FLAN", - "GITHUB_MIRROR_V1_0_1", - "GITHUB_MIRROR_V2_1_0", - "CMS_WIKIPEDIA_LANG_FILTERED", - "CMS_STACKOVERFLOW_MULTILINGUAL", - "CMS_STACKEXCHANGE", - "PUBMED", - "GEMINI_DOCJOINS_EN_TOP10B_GCC", - "GEMINI_DOCJOINS_EN_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_EN_TOP20B_TOP100B_GCC", - "GEMINI_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_GCC", - "GEMINI_DOCJOINS_I18N_TOP10B_TOP20B_GCC", - "GEMINI_DOCJOINS_I18N_TOP20B_TOP100B_GCC", - "SIMPLIFIED_HTML_V1_GCC", - "GEMINI_DOCJOINS_TOXICITY_TAGGED_GCC", - "CMS_GITHUB_V4", - "GITHUB_HTML_V4", - "GITHUB_OTHER_V4", - "GITHUB_LONG_TAIL_V4", - "CMS_GITHUB_MULTIFILE_V4", - "GITHUB_DIFFS_WITH_COMMIT_MESSAGE", - "ULM_ARXIV", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_ENONLY", - "NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_040623_LONG_DEDUP_NONENONLY", - "QUORA", - "PODCASTS_ROBOTSTXT", - "COMBINED_REDDIT", - "CANARIES_SHUFFLED", - "CLM_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "TECHDOCS_DATA_SOURCE", - "SCIENCE_PDF_70M_DOCS_FILTERED", - "GEMINI_V1_CMS_WIKIPEDIA_LANG_FILTERED", - "GEMINI_V1_WIKIPEDIA_DIFFS", - "GEMINI_V1_DOCJOINS_EN_TOP10B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP10B_TOP20B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP20B_TOP100B_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_EN_TOP100B_ALL_INDEXED_GCC_NODEDUP_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP10B_TOP20B_GCC_050523", - "GEMINI_V1_DOCJOINS_I18N_TOP20B_TOP100B_GCC_050523", - "GEMINI_V1_SIMPLIFIED_HTML_V2_GCC", - "GEMINI_V1_CMS_STACKOVERFLOW_MULTILINGUAL_V2", - "GEMINI_V1_CMS_STACKEXCHANGE_DECONT", - "GEMINI_V1_QUORA", - "GEMINI_V1_COMBINED_REDDIT", - "GEMINI_V1_DOCJOIN_100B_EN_TOXICITY_TAGGED_GCC_FIXED_TAGS", - "GEMINI_V1_PUBMED", - "GEMINI_V1_WEB_MATH_V2", - "GEMINI_V1_CMS_GITHUB_V7", - "GEMINI_V1_CMS_GITHUB_DECONTAMINATED_V_7", - "GEMINI_V1_GITHUB_DIFF_WITH_COMMIT_MESSAGE_V2", - "GEMINI_V1_GITHUB_HTML_CSS_XML_V4", - "GEMINI_V1_GITHUB_OTHER_V4", - "GEMINI_V1_GITHUB_LONG_TAIL_V4", - "GEMINI_V1_GITHUB_JUPTYER_NOTEBOOKS_SSTABLE", - "GEMINI_V1_ULM_ARXIV_SSTABLE", - "GEMINI_V1_PODCASTS_ROBOTSTXT", - "GEMINI_V1_SCIENCE_PDF_68M_HQ_DOCS_GCC", - "GEMINI_V1_GITHUB_TECHDOCS_V2", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_EN", - "GEMINI_V1_NONPUB_COPYRIGHT_BOOKS_V2_70_CONF_LONG_DEDUP_NONEN", - "GEMINI_V1_STEM_BOOKS_650K_TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_M3W_V2_FILTERED", - "GEMINI_V1_VQCOCA_1B_MULTIRES_WEBLI_EN_V4_350M_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_SCREENAI_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CULTURE_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_EN_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_CC3M_I18N_PREFIXED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_OCR_NON_EN_PREFIXED_FILTERED_IMAGE2TEXT", - "GEMINI_V1_VQCOCA_1B_MULTIRES_VTP_4F_VIDEO2TEXT_PREFIX", - "GEMINI_V1_FORMAL_MATH_WITHOUT_HOLSTEPS_AND_MIZAR", - "GEMINI_V1_TRANSLATE_DATAV2_ALLTIERS_GCC_MIX", - "GEMINI_V1_CANARIES_SHUFFLED_DOCJOIN_EN_NONEN_CODE_ARXIV_TRANSLATE", - "DUET_CLOUD_SECURITY_DOCS", - "DUET_GITHUB_CODE_SNIPPETS", - "DUET_GITHUB_FILES", - "DUET_GOBYEXAMPLE", - "DUET_GOLANG_DOCS", - "DUET_CLOUD_DOCS_TROUBLESHOOTING_TABLES", - "DUET_DEVSITE_DOCS", - "DUET_CLOUD_BLOG_POSTS", - "DUET_CLOUD_PODCAST_EPISODES", - "DUET_YOUTUBE_VIDEOS", - "DUET_CLOUD_SKILLS_BOOST", - "DUET_CLOUD_DOCS", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_GENERATED", - "DUET_CLOUD_GITHUB_CODE_SNIPPETS_HANDWRITTEN", - "DUET_GOOGLESQL_GENERATION", - "DUET_CLOUD_IX_PROMPTS", - "DUET_RAD", - "DUET_STACKOVERFLOW_ISSUES", - "DUET_STACKOVERFLOW_ANSWERS", - "BARD_ARCADE_GITHUB", - "MOBILE_ASSISTANT_MAGI_FILTERED_0825_373K", - "MOBILE_ASSISTANT_PALM24B_FILTERED_400K", - "GENESIS_NEWS_INSIGHTS", - "LABS_AQA_DSCOUT", - "LABS_AQA_TAILWIND", - "LABS_AQA_DELEWARE", - "GEMINI_MULTIMODAL_FT_URL", - "GEMINI_MULTIMODAL_FT_YT", - "GEMINI_MULTIMODAL_FT_SHUTTERSTOCK", - "GEMINI_MULTIMODAL_FT_NONE", - "GEMINI_MULTIMODAL_FT_OTHER", - "GEMINI_MULTIMODAL_FT_INK", - "GEMINI_MULTIMODAL_IT", - "GEMINI_IT_SHUTTERSTOCK", - "GEMINI_IT_M3W", - "GEMINI_IT_HEDGING", - "GEMINI_IT_DSCOUT_FACTUALITY", - "GEMINI_IT_AQUAMUSE", - "GEMINI_IT_SHOTGUN", - "GEMINI_IT_ACI_BENCH", - "GEMINI_IT_SPIDER_FILTERED", - "GEMINI_IT_TAB_SUM_BQ", - "GEMINI_IT_QA_WITH_URL", - "GEMINI_IT_CODE_INSTRUCT", - "GEMINI_IT_MED_PALM", - "GEMINI_IT_TASK_ORIENTED_DIALOG", - "GEMINI_IT_NIMBUS_GROUNDING_TO_PROMPT", - "GEMINI_IT_EITL_GEN", - "GEMINI_IT_HITL_GEN", - "GEMINI_IT_MECH", - "GEMINI_IT_TABLE_GEN", - "GEMINI_IT_NIMBUS_DECIBEL", - "GEMINI_IT_CLOUD_CODE_IF", - "GEMINI_IT_CLOUD_EUR_LEX_JSON", - "GEMINI_IT_CLOUD_OASST", - "GEMINI_IT_CLOUD_SELF_INSTRUCT", - "GEMINI_IT_CLOUD_UCS_AQUAMUSE", - "GEMIT_BRIDGE_SUFFIX_FT", - "CLOUD_SECURITY_PRETRAINING", - "CLOUD_SECURITY_FINETUNING", - "CLOUD_SECURITY_RAG_CISA", - "GEMINI_GOOSE_PUBLIC", - "GEMINI_GOOSE_SILOED", - "GEMINI_V2_CMS_WIKIPEDIA_LANG_FILTERED_GCC_PII", - "GEMINI_V2_WIKIPEDIA_DIFFS_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP10B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP10B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP20B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP20B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP100B_211123_PII_FILTERED", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_NONARTICLES_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_NONEN_TOP100B_111323_WITHOUT_CJKT_STOP_ARTICLES_COMPLIANT", - "GEMINI_V2_ENGLISH_ARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_ENGLISH_NONARTICLES_TOP500B_211123_PII_FILTERED", - "GEMINI_V2_QUORA_COMPLIANT", - "GEMINI_V2_FORUMS_V2_COMPLIANT", - "GEMINI_V2_CMS_STACKOVERFLOW_MULTILINGUAL_V2_COMPLIANT", - "GEMINI_V2_SIMPLIFIED_HTML_V2_CORRECT_FORMAT_COMPLIANT", - "GEMINI_V2_GEMINI_DOCJOINS_TOXICITY_TAGGED_FIXED_TAGS_COMPLIANT", - "GEMINI_V2_CODEWEB_V1_COMPLIANT", - "GEMINI_V2_LEETCODE_GCC_PII", - "GEMINI_V2_CODE_CONTESTS_COMPLIANT", - "GEMINI_V2_CMS_GITHUB_MULTI_FILE_FOR_FIM_GEMBAGZ_FIXED_BYTES_LENGTHS", - "GEMINI_V2_GITHUB_EVALED_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_NON_EVAL_HIGH_PRI_LANGUAGES_COMPLIANT", - "GEMINI_V2_GITHUB_LOW_PRI_LANGUAGES_AND_CONFIGS_COMPLIANT", - "GEMINI_V2_GITHUB_LONG_TAIL_AND_STRUCTURED_DATA_COMPLIANT", - "GEMINI_V2_GITHUB_PYTHON_NOTEBOOKS_COMPLIANT", - "GEMINI_V2_GITHUB_DIFFS_COMPLIANT", - "GEMINI_V2_GITHUB_TECHDOCS_COMPLIANT", - "GEMINI_V2_HIGH_QUALITY_CODE_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_SCIENCE_PDF_68M_HQ_DOCS_DEDUP_COMPLIANT_CLEAN_TEX", - "GEMINI_V2_ARXIV_2023_COMPLIANT", - "GEMINI_V2_FORMAL_COMPLIANT", - "GEMINI_V2_CMS_STACKEXCHANGE_COMPLIANT", - "GEMINI_V2_PUBMED_COMPLIANT", - "GEMINI_V2_WEB_MATH_V3_COMPLIANT", - "GEMINI_V2_SCIENCEWEB_V0_GCC_PII", - "GEMINI_V2_WEB_POLYMATH_V1_COMPLIANT", - "GEMINI_V2_MATH_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_BIOLOGY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_PHYSICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHEMISTRY_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MACHINE_LEARNING_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_QA_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_ECONOMICS_V2_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_MEDICAL_TARGETED_DATA_COMPLIANT2", - "GEMINI_V2_CHESS_COMPLIANT", - "GEMINI_V2_YOUTUBE_SCIENCE_V4_FILTERED_COMPLIANT", - "GEMINI_V2_GOALDMINE_XL_GENERATED_PLUS_GT_NO_DM_MATH_COMPLIANT", - "GEMINI_V2_FIRSTTIMES_SCIENCE_PDF_DEDUP_HQ_LENGTH_FILTERED_COMPLIANT", - "GEMINI_V2_PODCASTS_COMPLIANT", - "GEMINI_V2_EN_NONSCIENCE_PDF_DEDUP_46M_DOCS_COMPLIANT", - "GEMINI_V2_NONPUB_COPYRIGHT_BOOKS_V3_70_CONF_082323_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_COPYRIGHT_BOOKS_V3_111823_LONG_DEDUP_ENONLY_COMPLIANT", - "GEMINI_V2_STEM_BOOKS_318K_TEXT_COMPLIANT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M3W_WITH_IMAGE_TOKENS_INSERTED_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_T2I_TEXT_TO_IMAGE_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_EN_V4_350M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_I18N_680M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_FULL_HTML_75M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SCREENAI_V1_1_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_OCR_DOC_240M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SHUTTERSTOCK_VIDEO_VIDEO_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_M4W_INTERLEAVED_COMPLIANT_PII_FILTERED_SOFT", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CULTURE_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_DETECTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WEBLI_ALT_TEXT_NONEN_500M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_SPATIAL_AWARE_PALI_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_3D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TABLE2MD_V2_NON_EN_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_3D_DOC_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CC3M_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_INFOGRAPHICS_LARGE_WEB_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_BIORXIV_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PLOT2TABLE_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_TIKZ_DERENDERING_MERGED_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_CLOUDAI_TABLE2HTML_2D_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_WIKIPEDIA_EQUATIONS_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_PHOTOMATH_EQ2LATEX_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_CACHED_VQCOCA_MMFT_17T_ARXIV_EQUATIONS_V2_IMAGE_TO_TEXT_COMPLIANT_PII_FILTERED", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_ASR_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_SUP_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_TTS_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_PODIOSET_INTERLEAVE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_ENUS_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_SCIENCE_I18N_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_1P5M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_USM2B_MLPV5_YT_INTERLEAVE_HEAD_4M_GEMBAGZ_V2_COMPLIANT", - "GEMINI_V2_CLM_TRANSLATE_DATAV3_WEB_UNWMT_INCR_MIX", - "GEMINI_V2_NTL_NTLV4A_MONOLINGUAL_DEDUP_N5", - "GEMINI_V2_NTL_STT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_TRANSLIT_BILEX_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_BT_TRANSLATE_DEDUP_N5", - "GEMINI_V2_NTL_SYN_FT_FIXED_TRANSLATE_DEDUP_N5", - "GEMINI_V2_CANARIES_SHUFFLED_COMPLIANT", - "CLOUD_GEMIT_CLOUD_FACTUALITY_GROUNDING_MAGI", - "CLOUD_GEMIT_MT_DIALGUE_LMSYS", - "CLOUD_GEMIT_MTS_DIALOGUE_V3", - "CLOUD_GEMIT_COMMIT_MSG_GEN_V3", - "CLOUD_GEMIT_CODE_IF_V1", - "CLOUD_GEMIT_CODE_SELF_REPAIR", - "CLOUD_GEMIT_IDENTITY", - "CLOUD_GEMIT_SEARCH_AUGMENTED_RESPONSE_GENERATION", - "CLOUD_GEMIT_AMPS", - "CLOUD_GEMIT_AQUA", - "CLOUD_GEMIT_COMMON_SENSE_REASONING_SCHEMA", - "CLOUD_GEMIT_GSM8K_SCHEMA", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_UN", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_EUROPARL", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_WMT_NEWSCOMMENTARY", - "GEMINI_V1_TAIL_PATCH_TRANSLATION_2021_INCR", - "GEMINI_V1_TAIL_PATCH_GOALDMINE", - "GEMINI_V1_TAIL_PATCH_PHOTOMATH_IM2SOL_PROBLEM_AND_SOLUTION", - "GEMINI_V1_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V1_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V1_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V1_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V1_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_CCAI_DIALOG_SUM_HUMAN", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_PUNTING", - "GEMINI_V2_TAIL_PATCH_MATH_REASONING_NON_PUNTING", - "GEMINI_V2_TAIL_PATCH_JSON_TABLE_EXTRACTION", - "GEMINI_V2_TAIL_PATCH_BIRD_SQL_LITE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_ANSWERABLE", - "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", - "GEMINI_V2_TAIL_PATCH_PMC", - "GEMINI_V2_TAIL_PATCH_VOXPOPULI", - "GEMINI_V2_TAIL_PATCH_FLEURS", - "GEMINI_V2_SSFS", - "GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", - "GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", - "GEMINI_V2_SQL_REPAIR_SFT", - "GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", - "YT_CONTENT_INSPIRATION" - ], - "enumDescriptions": [ - "", - "Wikipedia article Tensorflow datasets used by Tarzan and maintained by TFDS team.", - "Webdocs that have been filtered from the docjoins by the Tarzan team for use in the Tarzan training set.", - "", - "", - "'Full view' books dataset maintained by Oceanographers team, meaning 'ok to view the book in full in all localities'. Largely the same as 'public domain', but with potentially subtle distinction.", - "Filtered private books used by ULM: http://google3/learning/multipod/pax/lm/params/ulm/tasks.py;l=123;rcl=494241309. which corresponds with /cns/mf-d/home/multipod-language-data/private_books/books_filtered_en_resharded@50000", - "Google news dataset referenced in: http://google3/learning/brain/research/conversation/meena/t5/pretrain_tasks.py;l=922;rcl=496534668", - "The docjoins data for ULM /cns/yo-d/home/multipod-language-data/docjoins/rs=6.3/20220728/100B_docstructure_split/examples_en.tfrecord_lattice_05_score_01_HFV13@3929", - "", - "Meena full conversations. http://google3/learning/brain/research/conversation/meena/t5/pretrain_mixtures.py;l=675;rcl=496583228", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Academic dataset of math text. http://google3/learning/brain/research/conversation/meena/seqio/mixtures/experimental/bard.py;rcl=500222380", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Datasets managed by the Goodall team: deepmind-goodall@google.com", - "", - "", - "", - "", - "", - "", - "", - "Datasets used by Codepoet", - "Datasets used by Vertex", - "", - "", - "Datasets used by Gemini Public data", - "", - "", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "Github", - "", - "", - "", - "", - "", - "Arxiv", - "Others", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V1, order by precedence. Wikipedia", - "", - "Public webdocs", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "GitHub dataset with license info. We prefer this to help cite proper licenses for code recitation.", - "", - "", - "", - "", - "", - "", - "ArXiv", - "Citable misc", - "", - "", - "Non-public books", - "", - "", - "Other", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Duet AI finetune datasets, order by precedence.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Bard ARCADE finetune dataset", - "Mobile assistant finetune datasets.", - "", - "Genesis fine tuned datasets.", - "LABS AQA fine-tune datasets.", - "", - "", - "Gemini multimodal instruction tune(IT) and fine tune(FT) datasets datasets.", - "", - "", - "", - "", - "", - "", - "Gemini IT 1.2.7 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemit Bridge ULM FT dataset", - "Cloud Security fine tuned datasets.", - "", - "", - "Gemini Goose FT datasets.", - "", - "Gemini V2 datasets", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Cloud gemit pro FT datasets.", - "", - "", - "", - "", - "", - "", - "Cloud gemit ultra FT datasets.", - "", - "", - "", - "", - "Gemini V1 tail patch translation.", - "", - "", - "", - "Gemini V1 tail patch others.", - "", - "Gemini V1 and V2 shared tail patch.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Gemini V2 only tail patch.", - "", - "", - "Gemini V2 rev10", - "", - "", - "", - "", - "", - "Youtube Content Inspiration FT datasets." - ], - "type": "string" - }, - "displayAttributionMessage": { - "description": "human-friendly string that contains information from doc_attribution which could be shown by clients", - "type": "string" - }, - "docAttribution": { - "$ref": "LearningGenaiRecitationDocAttribution" - }, - "docOccurrences": { - "description": "number of documents that contained this segment", - "format": "int32", - "type": "integer" - }, - "endIndex": { - "format": "int32", - "type": "integer" - }, - "rawText": { - "description": "The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options.", - "type": "string" - }, - "segmentRecitationAction": { - "enum": [ - "ACTION_UNSPECIFIED", - "CITE", - "BLOCK", - "NO_ACTION", - "EXEMPT_FOUND_IN_PROMPT" - ], - "enumDescriptions": [ - "", - "indicate that attribution must be shown for a Segment", - "indicate that a Segment should be blocked from being used", - "for tagging high-frequency code snippets", - "The recitation was found in prompt and is exempted from overall results" - ], - "type": "string" - }, - "sourceCategory": { - "description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", - "enum": [ - "SOURCE_CATEGORY_UNSPECIFIED", - "SOURCE_CATEGORY_WIKIPEDIA", - "SOURCE_CATEGORY_WEBDOCS", - "SOURCE_CATEGORY_GITHUB", - "SOURCE_CATEGORY_ARXIV", - "SOURCE_CATEGORY_PRIVATE_BOOKS", - "SOURCE_CATEGORY_OTHERS", - "SOURCE_CATEGORY_PUBLIC_BOOKS", - "SOURCE_CATEGORY_GNEWS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "startIndex": { - "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "LearningGenaiRootCalculationType": { - "description": "The type used for final weights calculation.", - "id": "LearningGenaiRootCalculationType", - "properties": { - "scoreType": { - "enum": [ - "TYPE_UNKNOWN", - "TYPE_SAFE", - "TYPE_POLICY", - "TYPE_GENERATION" - ], - "enumDescriptions": [ - "Unknown scorer type.", - "Safety scorer.", - "Policy scorer.", - "Generation scorer." - ], - "type": "string" - }, - "weights": { - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "LearningGenaiRootClassifierOutput": { - "id": "LearningGenaiRootClassifierOutput", - "properties": { - "ruleOutput": { - "$ref": "LearningGenaiRootRuleOutput", - "deprecated": true, - "description": "If set, this is the output of the first matching rule." - }, - "ruleOutputs": { - "description": "outputs of all matching rule.", - "items": { - "$ref": "LearningGenaiRootRuleOutput" - }, - "type": "array" - }, - "state": { - "$ref": "LearningGenaiRootClassifierState", - "description": "The results of data_providers and metrics." - } - }, - "type": "object" - }, - "LearningGenaiRootClassifierOutputSummary": { - "id": "LearningGenaiRootClassifierOutputSummary", - "properties": { - "metrics": { - "items": { - "$ref": "LearningGenaiRootMetricOutput" - }, - "type": "array" - }, - "ruleOutput": { - "$ref": "LearningGenaiRootRuleOutput", - "deprecated": true, - "description": "Output of the first matching rule." - }, - "ruleOutputs": { - "description": "outputs of all matching rule.", - "items": { - "$ref": "LearningGenaiRootRuleOutput" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootClassifierState": { - "description": "DataProviderOutput and MetricOutput can be saved between calls to the Classifier framework. For instance, you can run the query classifier, get outputs from those metrics, then use them in a result classifier as well. Example rule based on this idea: and_rules { rule { metric_name: 'query_safesearch_v2' ... } rule { metric_name: 'response_safesearch_v2' ... } }", - "id": "LearningGenaiRootClassifierState", - "properties": { - "dataProviderOutput": { - "items": { - "$ref": "LearningGenaiRootDataProviderOutput" - }, - "type": "array" - }, - "metricOutput": { - "items": { - "$ref": "LearningGenaiRootMetricOutput" - }, - "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyChatMetadata": { - "description": "Stores all metadata relating to AIDA DoConversation.", - "id": "LearningGenaiRootCodeyChatMetadata", - "properties": { - "codeLanguage": { - "description": "Indicates the programming language of the code if the message is a code chunk.", - "enum": [ - "UNSPECIFIED", - "ALL", - "TEXT", - "CPP", - "PYTHON", - "KOTLIN", - "JAVA", - "JAVASCRIPT", - "GO", - "R", - "JUPYTER_NOTEBOOK", - "TYPESCRIPT", - "HTML", - "SQL", - "BASH", - "C", - "DART", - "GRADLE", - "GROOVY", - "JAVADOC", - "JSON", - "MAKEFILE", - "MARKDOWN", - "PROTO", - "XML", - "YAML" - ], - "enumDescriptions": [ - "Unspecified Language.", - "All languages.", - "Not code.", - "The most common, well-supported languages. C++ code.", - "Python code.", - "Kotlin code.", - "Java code.", - "JavaScript code.", - "Go code.", - "R code.", - "Jupyter notebook.", - "TypeScript code.", - "HTML code.", - "SQL code.", - "Other languages in alphabetical order. BASH code.", - "C code.", - "Dart code.", - "Gradle code.", - "Groovy code.", - "API documentation.", - "JSON code.", - "Makefile code.", - "Markdown code.", - "Protocol buffer.", - "XML code.", - "YAML code." - ], - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyCheckpoint": { - "description": "Describes a sample at a checkpoint for post-processing.", - "id": "LearningGenaiRootCodeyCheckpoint", - "properties": { - "codeyTruncatorMetadata": { - "$ref": "LearningGenaiRootCodeyTruncatorMetadata", - "description": "Metadata that describes what was truncated at this checkpoint." - }, - "currentSample": { - "description": "Current state of the sample after truncator.", - "type": "string" - }, - "postInferenceStep": { - "description": "Postprocessor run that yielded this checkpoint.", - "enum": [ - "STEP_POST_PROCESSING_STEP_UNSPECIFIED", - "STEP_ORIGINAL_MODEL_OUTPUT", - "STEP_MODEL_OUTPUT_DEDUPLICATION", - "STEP_STOP_SEQUENCE_TRUNCATION", - "STEP_HEURISTIC_TRUNCATION", - "STEP_WALD_TRUNCATION", - "STEP_WHITESPACE_TRUNCATION", - "STEP_FINAL_DEDUPLICATION", - "STEP_TOXICITY_CHECK", - "STEP_RECITATION_CHECK", - "STEP_RETURNED", - "STEP_WALKBACK_CORRECTION", - "STEP_SCORE_THRESHOLDING", - "STEP_MODEL_CONFIG_STOP_SEQUENCE_TRUNCATION", - "STEP_CUSTOM_STOP_SEQUENCE_TRUNCATION", - "STEP_EXPECTED_SAMPLE_SIZE", - "STEP_TREE_TRIM_TRUNCATION" - ], - "enumDeprecated": [ - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false - ], - "enumDescriptions": [ - "", - "Original model outputs as-is.", - "Original model outputs after deduplication.", - "StopSequencePostProcessor.", - "Heuristic SuffixTruncator step.", - "Go service post-processor.", - "Truncate trailing whitespace and filter whitespace-only completions.", - "Deduplicate after all truncations.", - "Toxicity returns true.", - "Recitation causes BLOCK.", - "Return the response to the API.", - "Correcting walkback constraint (samples are dropped if they don't match the prefix constraint).", - "Thresholding samples based on a minimum score.", - "StopSequencePostProcessor.", - "StopSequencePostProcessor.", - "Drop extra number of samples that exceeds expected_samples.", - "Truncated by highest end token score." - ], - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootCodeyCompletionMetadata": { - "description": "Stores all metadata relating to Completion.", - "id": "LearningGenaiRootCodeyCompletionMetadata", - "properties": { - "checkpoints": { - "items": { - "$ref": "LearningGenaiRootCodeyCheckpoint" - }, - "type": "array" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for FeatureGroup." } }, "type": "object" }, - "LearningGenaiRootCodeyGenerationMetadata": { - "description": "Stores all metadata relating to GenerateCode.", - "id": "LearningGenaiRootCodeyGenerationMetadata", + "GoogleCloudAiplatformV1beta1UpdateFeatureOnlineStoreOperationMetadata": { + "description": "Details of operations that perform update FeatureOnlineStore.", + "id": "GoogleCloudAiplatformV1beta1UpdateFeatureOnlineStoreOperationMetadata", "properties": { - "output": { - "description": "Last state of the sample before getting dropped/returned.", - "type": "string" - }, - "postInferenceStep": { - "description": "Last Codey postprocessing step for this sample before getting dropped/returned.", - "enum": [ - "STEP_POST_PROCESSING_STEP_UNSPECIFIED", - "STEP_ORIGINAL_MODEL_OUTPUT", - "STEP_MODEL_OUTPUT_DEDUPLICATION", - "STEP_STOP_SEQUENCE_TRUNCATION", - "STEP_HEURISTIC_TRUNCATION", - "STEP_WALD_TRUNCATION", - "STEP_WHITESPACE_TRUNCATION", - "STEP_FINAL_DEDUPLICATION", - "STEP_TOXICITY_CHECK", - "STEP_RECITATION_CHECK", - "STEP_RETURNED", - "STEP_WALKBACK_CORRECTION", - "STEP_SCORE_THRESHOLDING", - "STEP_MODEL_CONFIG_STOP_SEQUENCE_TRUNCATION", - "STEP_CUSTOM_STOP_SEQUENCE_TRUNCATION", - "STEP_EXPECTED_SAMPLE_SIZE", - "STEP_TREE_TRIM_TRUNCATION" - ], - "enumDeprecated": [ - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false - ], - "enumDescriptions": [ - "", - "Original model outputs as-is.", - "Original model outputs after deduplication.", - "StopSequencePostProcessor.", - "Heuristic SuffixTruncator step.", - "Go service post-processor.", - "Truncate trailing whitespace and filter whitespace-only completions.", - "Deduplicate after all truncations.", - "Toxicity returns true.", - "Recitation causes BLOCK.", - "Return the response to the API.", - "Correcting walkback constraint (samples are dropped if they don't match the prefix constraint).", - "Thresholding samples based on a minimum score.", - "StopSequencePostProcessor.", - "StopSequencePostProcessor.", - "Drop extra number of samples that exceeds expected_samples.", - "Truncated by highest end token score." - ], - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for FeatureOnlineStore." } }, "type": "object" }, - "LearningGenaiRootCodeyOutput": { - "description": "Top-level wrapper used to store all things codey-related.", - "id": "LearningGenaiRootCodeyOutput", + "GoogleCloudAiplatformV1beta1UpdateFeatureOperationMetadata": { + "description": "Details of operations that perform update Feature.", + "id": "GoogleCloudAiplatformV1beta1UpdateFeatureOperationMetadata", "properties": { - "codeyChatMetadata": { - "$ref": "LearningGenaiRootCodeyChatMetadata" - }, - "codeyCompletionMetadata": { - "$ref": "LearningGenaiRootCodeyCompletionMetadata" - }, - "codeyGenerationMetadata": { - "$ref": "LearningGenaiRootCodeyGenerationMetadata" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for Feature Update." } }, "type": "object" }, - "LearningGenaiRootCodeyTruncatorMetadata": { - "description": "Metadata describing what was truncated at each checkpoint.", - "id": "LearningGenaiRootCodeyTruncatorMetadata", + "GoogleCloudAiplatformV1beta1UpdateFeatureViewOperationMetadata": { + "description": "Details of operations that perform update FeatureView.", + "id": "GoogleCloudAiplatformV1beta1UpdateFeatureViewOperationMetadata", "properties": { - "cutoffIndex": { - "description": "Index of the current sample that trims off truncated text.", - "format": "int32", - "type": "integer" - }, - "truncatedText": { - "description": "Text that was truncated at a specific checkpoint.", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for FeatureView Update." } }, "type": "object" }, - "LearningGenaiRootControlDecodingConfigThreshold": { - "description": "Score threshold for a category.", - "id": "LearningGenaiRootControlDecodingConfigThreshold", + "GoogleCloudAiplatformV1beta1UpdateFeaturestoreOperationMetadata": { + "description": "Details of operations that perform update Featurestore.", + "id": "GoogleCloudAiplatformV1beta1UpdateFeaturestoreOperationMetadata", "properties": { - "policy": { - "enum": [ - "UNSPECIFIED", - "DANGEROUS_CONTENT", - "HARASSMENT", - "HATE_SPEECH", - "SEXUALLY_EXPLICIT" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "scoreMax": { - "format": "float", - "type": "number" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for Featurestore." } }, "type": "object" }, - "LearningGenaiRootControlDecodingRecord": { - "description": "Holds one control decoding record.", - "id": "LearningGenaiRootControlDecodingRecord", + "GoogleCloudAiplatformV1beta1UpdateIndexOperationMetadata": { + "description": "Runtime operation information for IndexService.UpdateIndex.", + "id": "GoogleCloudAiplatformV1beta1UpdateIndexOperationMetadata", "properties": { - "prefixes": { - "description": "Prefixes feeded into scorer.", - "type": "string" - }, - "scores": { - "description": "Per policy scores returned from Scorer. Expect to have the same number of scores as in `thresholds`.", - "items": { - "$ref": "LearningGenaiRootControlDecodingRecordPolicyScore" - }, - "type": "array" - }, - "suffiexes": { - "description": "Suffixes feeded into scorer.", - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "The operation generic information." }, - "thresholds": { - "description": "Per policy thresholds from user config.", - "items": { - "$ref": "LearningGenaiRootControlDecodingConfigThreshold" - }, - "type": "array" + "nearestNeighborSearchOperationMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata", + "description": "The operation metadata with regard to Matching Engine Index operation." } }, "type": "object" }, - "LearningGenaiRootControlDecodingRecordPolicyScore": { - "id": "LearningGenaiRootControlDecodingRecordPolicyScore", + "GoogleCloudAiplatformV1beta1UpdateModelDeploymentMonitoringJobOperationMetadata": { + "description": "Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob.", + "id": "GoogleCloudAiplatformV1beta1UpdateModelDeploymentMonitoringJobOperationMetadata", "properties": { - "policy": { - "enum": [ - "UNSPECIFIED", - "DANGEROUS_CONTENT", - "HARASSMENT", - "HATE_SPEECH", - "SEXUALLY_EXPLICIT" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "score": { - "format": "float", - "type": "number" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "The operation generic information." } }, "type": "object" }, - "LearningGenaiRootControlDecodingRecords": { - "id": "LearningGenaiRootControlDecodingRecords", + "GoogleCloudAiplatformV1beta1UpdateModelMonitorOperationMetadata": { + "description": "Runtime operation information for ModelMonitoringService.UpdateModelMonitor.", + "id": "GoogleCloudAiplatformV1beta1UpdateModelMonitorOperationMetadata", "properties": { - "records": { - "description": "One ControlDecodingRecord record maps to one rewind.", - "items": { - "$ref": "LearningGenaiRootControlDecodingRecord" - }, - "type": "array" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "The operation generic information." } }, "type": "object" }, - "LearningGenaiRootDataProviderOutput": { - "id": "LearningGenaiRootDataProviderOutput", + "GoogleCloudAiplatformV1beta1UpdatePersistentResourceOperationMetadata": { + "description": "Details of operations that perform update PersistentResource.", + "id": "GoogleCloudAiplatformV1beta1UpdatePersistentResourceOperationMetadata", "properties": { - "name": { - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for PersistentResource." }, - "status": { - "$ref": "UtilStatusProto", - "description": "If set, this DataProvider failed and this is the error message." + "progressMessage": { + "description": "Progress Message for Update LRO", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootFilterMetadata": { - "id": "LearningGenaiRootFilterMetadata", + "GoogleCloudAiplatformV1beta1UpdateSpecialistPoolOperationMetadata": { + "description": "Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool.", + "id": "GoogleCloudAiplatformV1beta1UpdateSpecialistPoolOperationMetadata", "properties": { - "confidence": { - "description": "Filter confidence.", - "enum": [ - "FILTER_CONFIDENCE_UNKNOWN", - "FILTER_CONFIDENCE_VERY_LOW", - "FILTER_CONFIDENCE_LOW", - "FILTER_CONFIDENCE_MEDIUM", - "FILTER_CONFIDENCE_HIGH", - "FILTER_CONFIDENCE_VERY_HIGH" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "debugInfo": { - "$ref": "LearningGenaiRootFilterMetadataFilterDebugInfo", - "description": "Debug info for the message." - }, - "fallback": { - "description": "A fallback message chosen by the applied filter.", - "type": "string" - }, - "info": { - "description": "Additional info for the filter.", - "type": "string" - }, - "name": { - "description": "Name of the filter that triggered.", - "type": "string" - }, - "reason": { - "description": "Filter reason.", - "enum": [ - "FILTER_REASON_UNKNOWN", - "FILTER_REASON_NOT_FILTERED", - "FILTER_REASON_SENSITIVE", - "FILTER_REASON_RECITATION", - "FILTER_REASON_LANGUAGE", - "FILTER_REASON_TAKEDOWN", - "FILTER_REASON_CLASSIFIER", - "FILTER_REASON_EMPTY_RESPONSE", - "FILTER_REASON_SIMILARITY_TAKEDOWN", - "FILTER_REASON_UNSAFE", - "FILTER_REASON_PAIRWISE_CLASSIFIER", - "FILTER_REASON_CODEY", - "FILTER_REASON_URL", - "FILTER_REASON_EMAIL", - "FILTER_REASON_SAFETY_CAT", - "FILTER_REASON_REQUEST_RESPONSE_TAKEDOWN", - "FILTER_REASON_RAI_PQC", - "FILTER_REASON_ATLAS", - "FILTER_REASON_RAI_CSAM", - "FILTER_REASON_RAI_FRINGE", - "FILTER_REASON_RAI_SPII", - "FILTER_REASON_RAI_IMAGE_VIOLENCE", - "FILTER_REASON_RAI_IMAGE_PORN", - "FILTER_REASON_RAI_IMAGE_CSAM", - "FILTER_REASON_RAI_IMAGE_PEDO", - "FILTER_REASON_RAI_IMAGE_CHILD", - "FILTER_REASON_RAI_VIDEO_FRAME_VIOLENCE", - "FILTER_REASON_RAI_VIDEO_FRAME_PORN", - "FILTER_REASON_RAI_VIDEO_FRAME_CSAM", - "FILTER_REASON_RAI_VIDEO_FRAME_PEDO", - "FILTER_REASON_RAI_VIDEO_FRAME_CHILD", - "FILTER_REASON_RAI_CONTEXTUAL_DANGEROUS", - "FILTER_REASON_RAI_GRAIL_TEXT", - "FILTER_REASON_RAI_GRAIL_IMAGE", - "FILTER_REASON_RAI_SAFETYCAT", - "FILTER_REASON_TOXICITY", - "FILTER_REASON_ATLAS_PRICING", - "FILTER_REASON_ATLAS_BILLING", - "FILTER_REASON_ATLAS_NON_ENGLISH_QUESTION", - "FILTER_REASON_ATLAS_NOT_RELATED_TO_GCP", - "FILTER_REASON_ATLAS_AWS_AZURE_RELATED", - "FILTER_REASON_XAI", - "FILTER_CONTROL_DECODING" - ], - "enumDescriptions": [ - "Unknown filter reason.", - "Input not filtered.", - "Sensitive content.", - "Recited content.", - "Language filtering", - "Takedown policy", - "Classifier Module", - "Empty response message.", - "Similarity takedown.", - "Unsafe responses from scorers.", - "Pairwise classifier.", - "Codey Filter.", - "URLs Filter.", - "Emails Filter.", - "SafetyCat filter.", - "Request Response takedown.", - "RAI Filter.", - "Atlas specific topic filter", - "RAI Filter.", - "RAI Filter.", - "RAI Filter.", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "RAI Filter", - "Grail Text", - "Grail Image", - "SafetyCat.", - "Toxic content.", - "Atlas specific topic filter for pricing questions.", - "Atlas specific topic filter for billing questions.", - "Atlas specific topic filter for non english questions.", - "Atlas specific topic filter for non GCP questions.", - "Atlas specific topic filter aws/azure related questions.", - "Right now we don't do any filtering for XAI. Adding this just want to differentiatiat the XAI output metadata from other SafetyCat RAI output metadata", - "The response are filtered because it could not pass the control decoding thresholds and the maximum rewind attempts is reached." - ], - "type": "string" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "The operation generic information." }, - "text": { - "description": "The input query or generated response that is getting filtered.", + "specialistPool": { + "description": "Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`", + "readOnly": true, "type": "string" } }, "type": "object" }, - "LearningGenaiRootFilterMetadataFilterDebugInfo": { - "id": "LearningGenaiRootFilterMetadataFilterDebugInfo", + "GoogleCloudAiplatformV1beta1UpdateTensorboardOperationMetadata": { + "description": "Details of operations that perform update Tensorboard.", + "id": "GoogleCloudAiplatformV1beta1UpdateTensorboardOperationMetadata", "properties": { - "classifierOutput": { - "$ref": "LearningGenaiRootClassifierOutput" - }, - "defaultMetadata": { - "type": "string" - }, - "languageFilterResult": { - "$ref": "LearningGenaiRootLanguageFilterResult" - }, - "raiOutput": { - "$ref": "LearningGenaiRootRAIOutput", - "description": "Safety filter output information for LLM Root RAI harm check." - }, - "raiResult": { - "$ref": "CloudAiNlLlmProtoServiceRaiResult" - }, - "raiSignal": { - "$ref": "CloudAiNlLlmProtoServiceRaiSignal", - "deprecated": true - }, - "records": { - "$ref": "LearningGenaiRootControlDecodingRecords", - "description": "Number of rewinds by controlled decoding." - }, - "streamRecitationResult": { - "$ref": "LanguageLabsAidaTrustRecitationProtoStreamRecitationResult", - "deprecated": true - }, - "takedownResult": { - "$ref": "LearningGenaiRootTakedownResult" - }, - "toxicityResult": { - "$ref": "LearningGenaiRootToxicityResult" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "Operation metadata for Tensorboard." } }, "type": "object" }, - "LearningGenaiRootGroundingMetadata": { - "id": "LearningGenaiRootGroundingMetadata", + "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeOperationMetadata": { + "description": "Metadata information for NotebookService.UpgradeNotebookRuntime.", + "id": "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeOperationMetadata", "properties": { - "citations": { - "items": { - "$ref": "LearningGenaiRootGroundingMetadataCitation" - }, - "type": "array" - }, - "groundingCancelled": { - "description": "True if grounding is cancelled, for example, no facts being retrieved.", - "type": "boolean" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "The operation generic information." }, - "searchQueries": { - "items": { - "type": "string" - }, - "type": "array" + "progressMessage": { + "description": "A human-readable message that shows the intermediate progress details of NotebookRuntime.", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootGroundingMetadataCitation": { - "id": "LearningGenaiRootGroundingMetadataCitation", + "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeRequest": { + "description": "Request message for NotebookService.UpgradeNotebookRuntime.", + "id": "GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeRequest", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1UploadModelOperationMetadata": { + "description": "Details of ModelService.UploadModel operation.", + "id": "GoogleCloudAiplatformV1beta1UploadModelOperationMetadata", "properties": { - "endIndex": { - "description": "Index in the prediction output where the citation ends (exclusive). Must be > start_index and <= len(output).", - "format": "int32", - "type": "integer" - }, - "factIndex": { - "description": "Index of the fact supporting this claim. Should be within the range of the `world_facts` in the GenerateResponse.", - "format": "int32", - "type": "integer" - }, - "score": { - "description": "Confidence score of this entailment. Value is [0,1] with 1 is the most confidence.", - "format": "double", - "type": "number" - }, - "startIndex": { - "description": "Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index.", - "format": "int32", - "type": "integer" + "genericMetadata": { + "$ref": "GoogleCloudAiplatformV1beta1GenericOperationMetadata", + "description": "The common part of the operation metadata." } }, "type": "object" }, - "LearningGenaiRootHarm": { - "id": "LearningGenaiRootHarm", + "GoogleCloudAiplatformV1beta1UploadModelRequest": { + "description": "Request message for ModelService.UploadModel.", + "id": "GoogleCloudAiplatformV1beta1UploadModelRequest", "properties": { - "contextualDangerous": { - "description": "Please do not use, this is still under development.", - "type": "boolean" - }, - "csam": { - "type": "boolean" - }, - "fringe": { - "type": "boolean" - }, - "grailImageHarmType": { - "$ref": "LearningGenaiRootHarmGrailImageHarmType" - }, - "grailTextHarmType": { - "$ref": "LearningGenaiRootHarmGrailTextHarmType" - }, - "imageChild": { - "type": "boolean" - }, - "imageCsam": { - "type": "boolean" - }, - "imagePedo": { - "type": "boolean" - }, - "imagePorn": { - "description": "Image signals", - "type": "boolean" - }, - "imageViolence": { - "type": "boolean" - }, - "pqc": { - "type": "boolean" - }, - "safetycat": { - "$ref": "LearningGenaiRootHarmSafetyCatCategories" - }, - "spii": { - "$ref": "LearningGenaiRootHarmSpiiFilter", - "description": "Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l=77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for \"POSSIBLE\" is 3 / 5 = 0.6 ." - }, - "threshold": { - "format": "double", - "type": "number" - }, - "videoFrameChild": { - "type": "boolean" - }, - "videoFrameCsam": { - "type": "boolean" + "model": { + "$ref": "GoogleCloudAiplatformV1beta1Model", + "description": "Required. The Model to create." }, - "videoFramePedo": { - "type": "boolean" + "modelId": { + "description": "Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen.", + "type": "string" }, - "videoFramePorn": { - "description": "Video frame signals", - "type": "boolean" + "parentModel": { + "description": "Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version.", + "type": "string" }, - "videoFrameViolence": { - "type": "boolean" + "serviceAccount": { + "description": "Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.).", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootHarmGrailImageHarmType": { - "description": "Harm type for images", - "id": "LearningGenaiRootHarmGrailImageHarmType", + "GoogleCloudAiplatformV1beta1UploadModelResponse": { + "description": "Response message of ModelService.UploadModel operation.", + "id": "GoogleCloudAiplatformV1beta1UploadModelResponse", "properties": { - "imageHarmType": { - "items": { - "enum": [ - "IMAGE_HARM_TYPE_UNSPECIFIED", - "IMAGE_HARM_TYPE_PORN", - "IMAGE_HARM_TYPE_VIOLENCE", - "IMAGE_HARM_TYPE_CSAI", - "IMAGE_HARM_TYPE_PEDO", - "IMAGE_HARM_TYPE_MINORS", - "IMAGE_HARM_TYPE_DANGEROUS", - "IMAGE_HARM_TYPE_MEDICAL", - "IMAGE_HARM_TYPE_RACY", - "IMAGE_HARM_TYPE_OBSCENE", - "IMAGE_HARM_TYPE_MINOR_PRESENCE", - "IMAGE_HARM_TYPE_GENERATIVE_MINOR_PRESENCE", - "IMAGE_HARM_TYPE_GENERATIVE_REALISTIC_VISIBLE_FACE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "type": "array" + "model": { + "description": "The name of the uploaded Model resource. Format: `projects/{project}/locations/{location}/models/{model}`", + "type": "string" + }, + "modelVersionId": { + "description": "Output only. The version ID of the model that is uploaded.", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "LearningGenaiRootHarmGrailTextHarmType": { - "description": "Harm type for text", - "id": "LearningGenaiRootHarmGrailTextHarmType", + "GoogleCloudAiplatformV1beta1UploadRagFileConfig": { + "description": "Config for uploading RagFile.", + "id": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", "properties": { - "harmType": { - "items": { - "enum": [ - "HARM_TYPE_UNSPECIFIED", - "HARM_TYPE_HATE", - "HARM_TYPE_TOXICITY", - "HARM_TYPE_VIOLENCE", - "HARM_TYPE_CSAI", - "HARM_TYPE_SEXUAL", - "HARM_TYPE_FRINGE", - "HARM_TYPE_POLITICAL", - "HARM_TYPE_MEMORIZATION", - "HARM_TYPE_SPII", - "HARM_TYPE_NEW_DANGEROUS", - "HARM_TYPE_MEDICAL", - "HARM_TYPE_HARASSMENT" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "New definition of dangerous.", - "", - "" - ], - "type": "string" - }, - "type": "array" + "ragFileChunkingConfig": { + "$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", + "description": "Specifies the size and overlap of chunks after uploading RagFile." } }, "type": "object" }, - "LearningGenaiRootHarmSafetyCatCategories": { - "description": "LINT.ThenChange(//depot/google3/learning/genai/root/util/classifier/backends/grail/grail.cc)", - "id": "LearningGenaiRootHarmSafetyCatCategories", + "GoogleCloudAiplatformV1beta1UploadRagFileRequest": { + "description": "Request message for VertexRagDataService.UploadRagFile.", + "id": "GoogleCloudAiplatformV1beta1UploadRagFileRequest", "properties": { - "categories": { - "items": { - "enum": [ - "SAFETYCAT_CATEGORY_UNSPECIFIED", - "TOXICITY", - "OBSCENE", - "SEXUAL", - "INSULT", - "IDENTITY_HATE", - "DEATH_HARM_TRAGEDY", - "VIOLENCE_ABUSE", - "FIREARMS_WEAPONS", - "PUBLIC_SAFETY", - "HEALTH", - "RELIGION_BELIEF", - "DRUGS", - "WAR_CONFLICT", - "POLITICS", - "FINANCE", - "LEGAL", - "DANGEROUS", - "DANGEROUS_SEVERITY", - "HARASSMENT_SEVERITY", - "HATE_SEVERITY", - "SEXUAL_SEVERITY" - ], - "enumDescriptions": [ - "", - "SafetyCat categories.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Following categories are only supported in SAFETY_CAT_TEXT_V3_PAX model", - "", - "", - "", - "" - ], - "type": "string" - }, - "type": "array" + "ragFile": { + "$ref": "GoogleCloudAiplatformV1beta1RagFile", + "description": "Required. The RagFile to upload." + }, + "uploadRagFileConfig": { + "$ref": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", + "description": "Required. The config for the RagFiles to be uploaded into the RagCorpus. VertexRagDataService.UploadRagFile." } }, "type": "object" }, - "LearningGenaiRootHarmSpiiFilter": { - "description": "LINT.IfChange", - "id": "LearningGenaiRootHarmSpiiFilter", + "GoogleCloudAiplatformV1beta1UploadRagFileResponse": { + "description": "Response message for VertexRagDataService.UploadRagFile.", + "id": "GoogleCloudAiplatformV1beta1UploadRagFileResponse", "properties": { - "usBankRoutingMicr": { - "type": "boolean" - }, - "usEmployerIdentificationNumber": { - "type": "boolean" + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error that occurred while processing the RagFile." }, - "usSocialSecurityNumber": { - "type": "boolean" + "ragFile": { + "$ref": "GoogleCloudAiplatformV1beta1RagFile", + "description": "The RagFile that had been uploaded into the RagCorpus." } }, "type": "object" }, - "LearningGenaiRootInternalMetadata": { - "id": "LearningGenaiRootInternalMetadata", + "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest": { + "description": "Request message for IndexService.UpsertDatapoints", + "id": "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest", "properties": { - "scoredTokens": { + "datapoints": { + "description": "A list of datapoints to be created/updated.", "items": { - "$ref": "LearningGenaiRootScoredToken" + "$ref": "GoogleCloudAiplatformV1beta1IndexDatapoint" }, "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootLanguageFilterResult": { - "id": "LearningGenaiRootLanguageFilterResult", - "properties": { - "allowed": { - "description": "False when query or response should be filtered out due to unsupported language.", - "type": "boolean" }, - "detectedLanguage": { - "description": "Language of the query or response.", + "updateMask": { + "description": "Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts.", + "format": "google-fieldmask", "type": "string" - }, - "detectedLanguageProbability": { - "description": "Probability of the language predicted as returned by LangID.", - "format": "float", - "type": "number" } }, "type": "object" }, - "LearningGenaiRootMetricOutput": { - "id": "LearningGenaiRootMetricOutput", + "GoogleCloudAiplatformV1beta1UpsertDatapointsResponse": { + "description": "Response message for IndexService.UpsertDatapoints", + "id": "GoogleCloudAiplatformV1beta1UpsertDatapointsResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1UserActionReference": { + "description": "References an API call. It contains more information about long running operation and Jobs that are triggered by the API call.", + "id": "GoogleCloudAiplatformV1beta1UserActionReference", "properties": { - "debug": { + "dataLabelingJob": { + "description": "For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`", "type": "string" }, - "name": { - "description": "Name of the metric.", + "method": { + "description": "The method name of the API RPC call. For example, \"/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset\"", "type": "string" }, - "numericValue": { - "format": "double", - "type": "number" - }, - "status": { - "$ref": "UtilStatusProto" - }, - "stringValue": { + "operation": { + "description": "For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project}/locations/{location}/operations/{operation}`", "type": "string" } }, "type": "object" }, - "LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata": { - "id": "LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata", + "GoogleCloudAiplatformV1beta1Value": { + "description": "Value is the value of the field.", + "id": "GoogleCloudAiplatformV1beta1Value", "properties": { - "factRetrievalMillisecondsByProvider": { - "additionalProperties": { - "format": "int64", - "type": "string" - }, - "description": "Latency spent on fact retrievals. There might be multiple retrievals from different fact providers.", - "type": "object" - }, - "prompt2queryMilliseconds": { - "description": "Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt.", - "format": "int64", - "type": "string" + "doubleValue": { + "description": "A double value.", + "format": "double", + "type": "number" }, - "retrievalAugmentMilliseconds": { - "description": "Latency if use GroundedGeneration service for the whole retrieval & augmentation.", + "intValue": { + "description": "An integer value.", "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootRAIOutput": { - "description": "This is per harm.", - "id": "LearningGenaiRootRAIOutput", - "properties": { - "allowed": { - "type": "boolean" }, - "harm": { - "$ref": "LearningGenaiRootHarm" - }, - "name": { + "stringValue": { + "description": "A string value.", "type": "string" - }, - "score": { - "format": "double", - "type": "number" } }, "type": "object" }, - "LearningGenaiRootRegexTakedownResult": { - "id": "LearningGenaiRootRegexTakedownResult", + "GoogleCloudAiplatformV1beta1VertexAISearch": { + "description": "Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation", + "id": "GoogleCloudAiplatformV1beta1VertexAISearch", "properties": { - "allowed": { - "description": "False when query or response should be taken down due to match with a blocked regex, true otherwise.", - "type": "boolean" - }, - "takedownRegex": { - "description": "Regex used to decide that query or response should be taken down. Empty when query or response is kept.", + "datastore": { + "description": "Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", "type": "string" } }, "type": "object" }, - "LearningGenaiRootRequestMetrics": { - "id": "LearningGenaiRootRequestMetrics", + "GoogleCloudAiplatformV1beta1VertexRagStore": { + "description": "Retrieve from Vertex RAG Store for grounding.", + "id": "GoogleCloudAiplatformV1beta1VertexRagStore", "properties": { - "audioMetrics": { - "$ref": "LearningGenaiRootRequestMetricsAudioMetrics", - "description": "Metrics for audio samples in the request." - }, - "imageMetrics": { - "$ref": "LearningGenaiRootRequestMetricsImageMetrics", - "description": "Metrics for image samples in the request." + "ragCorpora": { + "deprecated": true, + "description": "Optional. Deprecated. Please use rag_resources instead.", + "items": { + "type": "string" + }, + "type": "array" }, - "textTokenCount": { - "description": "Number of text tokens extracted from the request.", - "format": "int32", - "type": "integer" + "ragResources": { + "description": "Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource" + }, + "type": "array" }, - "totalTokenCount": { - "description": "Total number of tokens in the request.", + "similarityTopK": { + "description": "Optional. Number of top k results to return from the selected corpora.", "format": "int32", "type": "integer" }, - "videoMetrics": { - "$ref": "LearningGenaiRootRequestMetricsVideoMetrics", - "description": "Metrics for video samples in the request." + "vectorDistanceThreshold": { + "description": "Optional. Only return results with vector distance smaller than the threshold.", + "format": "double", + "type": "number" } }, "type": "object" }, - "LearningGenaiRootRequestMetricsAudioMetrics": { - "id": "LearningGenaiRootRequestMetricsAudioMetrics", + "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource": { + "description": "The definition of the Rag resource.", + "id": "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource", "properties": { - "audioDuration": { - "description": "Duration of the audio sample in seconds.", - "format": "google-duration", + "ragCorpus": { + "description": "Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", "type": "string" }, - "audioTokenCount": { - "description": "Number of tokens derived directly from audio data.", - "format": "int32", - "type": "integer" - }, - "numAudioFrames": { - "description": "Number of audio frames in the audio.", - "format": "int32", - "type": "integer" + "ragFileIds": { + "description": "Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "LearningGenaiRootRequestMetricsImageMetrics": { - "id": "LearningGenaiRootRequestMetricsImageMetrics", + "GoogleCloudAiplatformV1beta1VideoMetadata": { + "description": "Metadata describes the input video content.", + "id": "GoogleCloudAiplatformV1beta1VideoMetadata", "properties": { - "imageTokenCount": { - "description": "Number of tokens extracted from image bytes.", - "format": "int32", - "type": "integer" + "endOffset": { + "description": "Optional. The end offset of the video.", + "format": "google-duration", + "type": "string" }, - "numImages": { - "description": "Number of images in the request.", - "format": "int32", - "type": "integer" + "startOffset": { + "description": "Optional. The start offset of the video.", + "format": "google-duration", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootRequestMetricsVideoMetrics": { - "id": "LearningGenaiRootRequestMetricsVideoMetrics", + "GoogleCloudAiplatformV1beta1WorkerPoolSpec": { + "description": "Represents the spec of a worker pool in a job.", + "id": "GoogleCloudAiplatformV1beta1WorkerPoolSpec", "properties": { - "audioSample": { - "$ref": "LearningGenaiRootRequestMetricsAudioMetrics", - "description": "Metrics associated with audio sample in the video." + "containerSpec": { + "$ref": "GoogleCloudAiplatformV1beta1ContainerSpec", + "description": "The custom container task." }, - "numVideoFrames": { - "description": "Number of video frames in the video.", - "format": "int32", - "type": "integer" + "diskSpec": { + "$ref": "GoogleCloudAiplatformV1beta1DiskSpec", + "description": "Disk spec." }, - "videoDuration": { - "description": "Duration of the video sample in seconds.", - "format": "google-duration", - "type": "string" + "machineSpec": { + "$ref": "GoogleCloudAiplatformV1beta1MachineSpec", + "description": "Optional. Immutable. The specification of a single machine." }, - "videoFramesTokenCount": { - "description": "Number of tokens extracted from video frames.", - "format": "int32", - "type": "integer" + "nfsMounts": { + "description": "Optional. List of NFS mount spec.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1NfsMount" + }, + "type": "array" + }, + "pythonPackageSpec": { + "$ref": "GoogleCloudAiplatformV1beta1PythonPackageSpec", + "description": "The Python packaged task." + }, + "replicaCount": { + "description": "Optional. The number of worker replicas to use for this worker pool.", + "format": "int64", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootRequestResponseTakedownResult": { - "id": "LearningGenaiRootRequestResponseTakedownResult", + "GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload": { + "description": "Contains Feature values to be written for a specific entity.", + "id": "GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload", "properties": { - "allowed": { - "description": "False when response has to be taken down per above config.", - "type": "boolean" - }, - "requestTakedownRegex": { - "description": "Regex used to match the request.", + "entityId": { + "description": "Required. The ID of the entity.", "type": "string" }, - "responseTakedownRegex": { - "description": "Regex used to decide that response should be taken down. Empty when response is kept.", - "type": "string" + "featureValues": { + "additionalProperties": { + "$ref": "GoogleCloudAiplatformV1beta1FeatureValue" + }, + "description": "Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future.", + "type": "object" } }, "type": "object" }, - "LearningGenaiRootRoutingDecision": { - "description": "Holds the final routing decision, by storing the model_config_id. And individual scores each model got.", - "id": "LearningGenaiRootRoutingDecision", + "GoogleCloudAiplatformV1beta1WriteFeatureValuesRequest": { + "description": "Request message for FeaturestoreOnlineServingService.WriteFeatureValues.", + "id": "GoogleCloudAiplatformV1beta1WriteFeatureValuesRequest", "properties": { - "metadata": { - "$ref": "LearningGenaiRootRoutingDecisionMetadata" - }, - "modelConfigId": { - "description": "The selected model to route traffic to.", - "type": "string" + "payloads": { + "description": "Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload" + }, + "type": "array" } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadata": { - "description": "Debug metadata about the routing decision.", - "id": "LearningGenaiRootRoutingDecisionMetadata", - "properties": { - "scoreBasedRoutingMetadata": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataScoreBased" - }, - "tokenLengthBasedRoutingMetadata": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBased" - } - }, + "GoogleCloudAiplatformV1beta1WriteFeatureValuesResponse": { + "description": "Response message for FeaturestoreOnlineServingService.WriteFeatureValues.", + "id": "GoogleCloudAiplatformV1beta1WriteFeatureValuesResponse", + "properties": {}, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataScoreBased": { - "description": "If we are routing using scored based configuration, then the metadata about that is available in this proto.", - "id": "LearningGenaiRootRoutingDecisionMetadataScoreBased", + "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataRequest": { + "description": "Request message for TensorboardService.WriteTensorboardExperimentData.", + "id": "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataRequest", "properties": { - "matchedRule": { - "$ref": "LearningGenaiRootScoreBasedRoutingConfigRule", - "description": "The rule that was matched." - }, - "score": { - "$ref": "LearningGenaiRootScore", - "description": "The score that was generated by the router i.e. the model." - }, - "usedDefaultFallback": { - "description": "No rules were matched & therefore used the default fallback.", - "type": "boolean" + "writeRunDataRequests": { + "description": "Required. Requests containing per-run TensorboardTimeSeries data to write.", + "items": { + "$ref": "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest" + }, + "type": "array" } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataTokenLengthBased": { - "id": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBased", + "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataResponse": { + "description": "Response message for TensorboardService.WriteTensorboardExperimentData.", + "id": "GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataResponse", + "properties": {}, + "type": "object" + }, + "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest": { + "description": "Request message for TensorboardService.WriteTensorboardRunData.", + "id": "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest", "properties": { - "modelInputTokenMetadata": { - "items": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata" - }, - "type": "array" + "tensorboardRun": { + "description": "Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`", + "type": "string" }, - "modelMaxTokenMetadata": { + "timeSeriesData": { + "description": "Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000.", "items": { - "$ref": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata" + "$ref": "GoogleCloudAiplatformV1beta1TimeSeriesData" }, "type": "array" } }, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata": { - "id": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata", - "properties": { - "computedInputTokenLength": { - "description": "The length computed by backends using the formatter & tokenizer specific to the model", - "format": "int32", - "type": "integer" - }, - "modelId": { - "type": "string" - }, - "pickedAsFallback": { - "description": "If true, the model was selected as a fallback, since no model met requirements.", - "type": "boolean" - }, - "selected": { - "description": "If true, the model was selected since it met the requriements.", - "type": "boolean" - } - }, + "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataResponse": { + "description": "Response message for TensorboardService.WriteTensorboardRunData.", + "id": "GoogleCloudAiplatformV1beta1WriteTensorboardRunDataResponse", + "properties": {}, "type": "object" }, - "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata": { - "id": "LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata", + "GoogleCloudAiplatformV1beta1XraiAttribution": { + "description": "An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models.", + "id": "GoogleCloudAiplatformV1beta1XraiAttribution", "properties": { - "maxNumInputTokens": { - "format": "int32", - "type": "integer" + "blurBaselineConfig": { + "$ref": "GoogleCloudAiplatformV1beta1BlurBaselineConfig", + "description": "Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383" + }, + "smoothGradConfig": { + "$ref": "GoogleCloudAiplatformV1beta1SmoothGradConfig", + "description": "Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf" }, - "maxNumOutputTokens": { + "stepCount": { + "description": "Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.", "format": "int32", "type": "integer" - }, - "modelId": { - "type": "string" } }, "type": "object" }, - "LearningGenaiRootRuleOutput": { - "id": "LearningGenaiRootRuleOutput", + "GoogleCloudLocationListLocationsResponse": { + "description": "The response message for Locations.ListLocations.", + "id": "GoogleCloudLocationListLocationsResponse", "properties": { - "decision": { - "enum": [ - "NO_MATCH", - "MATCH" - ], - "enumDescriptions": [ - "This rule was not matched. When used in a ClassifierOutput, this means that no rules were matched.", - "This is a generic \"match\" message, indicating that a rule was triggered. Usually you would use this for a categorization classifier." - ], - "type": "string" + "locations": { + "description": "A list of locations that matches the specified filter in the request.", + "items": { + "$ref": "GoogleCloudLocationLocation" + }, + "type": "array" }, - "name": { + "nextPageToken": { + "description": "The standard List next-page token.", "type": "string" } }, "type": "object" }, - "LearningGenaiRootScore": { - "id": "LearningGenaiRootScore", + "GoogleCloudLocationLocation": { + "description": "A resource that represents a Google Cloud location.", + "id": "GoogleCloudLocationLocation", "properties": { - "calculationType": { - "$ref": "LearningGenaiRootCalculationType" + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" }, - "internalMetadata": { - "$ref": "LearningGenaiRootInternalMetadata", - "description": "The internal_metadata is intended to be used by internal processors and will be cleared before returns." + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", + "type": "object" }, - "thresholdType": { - "$ref": "LearningGenaiRootThresholdType" + "locationId": { + "description": "The canonical id for this location. For example: `\"us-east1\"`.", + "type": "string" }, - "tokensAndLogprobPerDecodingStep": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStep", - "description": "Top candidate tokens and log probabilities at each decoding step." + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata. For example the available capacity at the given location.", + "type": "object" }, - "value": { - "format": "double", - "type": "number" + "name": { + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", + "type": "string" } }, "type": "object" }, - "LearningGenaiRootScoreBasedRoutingConfigRule": { - "id": "LearningGenaiRootScoreBasedRoutingConfigRule", + "GoogleIamV1Binding": { + "description": "Associates `members`, or principals, with a `role`.", + "id": "GoogleIamV1Binding", "properties": { - "equalOrGreaterThan": { - "$ref": "LearningGenaiRootScore", - "description": "NOTE: Hardest examples have smaller values in their routing scores." + "condition": { + "$ref": "GoogleTypeExpr", + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, - "lessThan": { - "$ref": "LearningGenaiRootScore" + "members": { + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.", + "items": { + "type": "string" + }, + "type": "array" }, - "modelConfigId": { - "description": "This model_config_id points to ModelConfig::id which allows us to find the ModelConfig to route to. This is part of the banks specified in the ModelBankConfig.", + "role": { + "description": "Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).", "type": "string" } }, "type": "object" }, - "LearningGenaiRootScoredSimilarityTakedownPhrase": { - "description": "Proto containing the results from the Universal Sentence Encoder / Other models", - "id": "LearningGenaiRootScoredSimilarityTakedownPhrase", - "properties": { - "phrase": { - "$ref": "LearningGenaiRootSimilarityTakedownPhrase" - }, - "similarityScore": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "LearningGenaiRootScoredToken": { - "description": "A token with its own score.", - "id": "LearningGenaiRootScoredToken", + "GoogleIamV1GetIamPolicyRequest": { + "description": "Request message for `GetIamPolicy` method.", + "id": "GoogleIamV1GetIamPolicyRequest", "properties": { - "endTokenScore": { - "description": "Each end_token_score is a logprob for how well the completion would end at a particular token. See http://google3/labs/language/aida/config/proto/model_config.proto;l=376;rcl=573039459", - "format": "float", - "type": "number" - }, - "score": { - "description": "Each score is the logprob for the token in model response.", - "format": "float", - "type": "number" - }, - "token": { - "type": "string" + "options": { + "$ref": "GoogleIamV1GetPolicyOptions", + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" }, - "LearningGenaiRootSimilarityTakedownPhrase": { - "description": "Each SimilarityTakedownPhrase treats a logical group of blocked and allowed phrases together along with a corresponding punt If the closest matching response is of the allowed type, we allow the response If the closest matching response is of the blocked type, we block the response. eg: Blocked phrase - \"All lives matter\"", - "id": "LearningGenaiRootSimilarityTakedownPhrase", + "GoogleIamV1GetPolicyOptions": { + "description": "Encapsulates settings provided to GetIamPolicy.", + "id": "GoogleIamV1GetPolicyOptions", "properties": { - "blockedPhrase": { - "type": "string" + "requestedPolicyVersion": { + "description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "LearningGenaiRootSimilarityTakedownResult": { - "id": "LearningGenaiRootSimilarityTakedownResult", + "GoogleIamV1Policy": { + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", + "id": "GoogleIamV1Policy", "properties": { - "allowed": { - "description": "False when query or response should be taken down by any of the takedown rules, true otherwise.", - "type": "boolean" - }, - "scoredPhrases": { - "description": "List of similar phrases with score. Set only if allowed=false.", + "bindings": { + "description": "Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.", "items": { - "$ref": "LearningGenaiRootScoredSimilarityTakedownPhrase" + "$ref": "GoogleIamV1Binding" }, "type": "array" - } - }, - "type": "object" - }, - "LearningGenaiRootTakedownResult": { - "id": "LearningGenaiRootTakedownResult", - "properties": { - "allowed": { - "description": "False when query or response should be taken down by any of the takedown rules, true otherwise.", - "type": "boolean" - }, - "regexTakedownResult": { - "$ref": "LearningGenaiRootRegexTakedownResult" }, - "requestResponseTakedownResult": { - "$ref": "LearningGenaiRootRequestResponseTakedownResult" + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", + "format": "byte", + "type": "string" }, - "similarityTakedownResult": { - "$ref": "LearningGenaiRootSimilarityTakedownResult" + "version": { + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "LearningGenaiRootThresholdType": { - "description": "The type of score that bundled with a threshold, and will not be attending the final score calculation. How each score type uses the threshold can be implementation details.", - "id": "LearningGenaiRootThresholdType", + "GoogleIamV1SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "id": "GoogleIamV1SetIamPolicyRequest", "properties": { - "scoreType": { - "enum": [ - "TYPE_UNKNOWN", - "TYPE_SAFE", - "TYPE_POLICY", - "TYPE_GENERATION" - ], - "enumDescriptions": [ - "Unknown scorer type.", - "Safety scorer.", - "Policy scorer.", - "Generation scorer." - ], - "type": "string" - }, - "threshold": { - "format": "double", - "type": "number" + "policy": { + "$ref": "GoogleIamV1Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." } }, "type": "object" }, - "LearningGenaiRootTokensAndLogProbPerDecodingStep": { - "description": "Results of RandomSamplingParams::top_k_logprob_per_decoding_step.", - "id": "LearningGenaiRootTokensAndLogProbPerDecodingStep", + "GoogleIamV1TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", + "id": "GoogleIamV1TestIamPermissionsRequest", "properties": { - "chosenCandidates": { - "description": "Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates.", - "items": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate" - }, - "type": "array" - }, - "topCandidates": { - "description": "Length = total number of decoding steps.", + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates" + "type": "string" }, "type": "array" } }, "type": "object" }, - "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate": { - "description": "A candidate at a decoding step.", - "id": "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate", - "properties": { - "logProbability": { - "description": "The candidate's log probability.", - "format": "float", - "type": "number" - }, - "token": { - "description": "The candidate’s token value.", - "type": "string" - } - }, - "type": "object" - }, - "LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates": { - "description": "Candidates with top log probabilities at each decoding step.", - "id": "LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates", + "GoogleIamV1TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "id": "GoogleIamV1TestIamPermissionsResponse", "properties": { - "candidates": { - "description": "Sorted by log probability in descending order.", + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { - "$ref": "LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate" + "type": "string" }, "type": "array" } }, "type": "object" }, - "LearningGenaiRootToxicityResult": { - "description": "A model can generate multiple signals and this captures all the generated signals for a single message.", - "id": "LearningGenaiRootToxicityResult", + "GoogleLongrunningListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "id": "GoogleLongrunningListOperationsResponse", "properties": { - "signals": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", "items": { - "$ref": "LearningGenaiRootToxicitySignal" + "$ref": "GoogleLongrunningOperation" }, "type": "array" } }, "type": "object" }, - "LearningGenaiRootToxicitySignal": { - "description": "Proto to capture a signal generated by the toxicity model.", - "id": "LearningGenaiRootToxicitySignal", + "GoogleLongrunningOperation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "GoogleLongrunningOperation", "properties": { - "allowed": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, - "label": { - "enum": [ - "LABEL_UNSPECIFIED", - "NOT_SENSITIVE", - "SENSITIVE", - "ACCIDENTS_DISASTERS", - "ADULT", - "COMPUTER_SECURITY", - "CONTROVERSIAL_SOCIAL_ISSUES", - "DEATH_TRAGEDY", - "DRUGS", - "IDENTITY_ETHNICITY", - "FINANCIAL_HARDSHIP", - "FIREARMS_WEAPONS", - "HEALTH", - "INSULT", - "LEGAL", - "MENTAL_HEALTH", - "POLITICS", - "RELIGION_BELIEFS", - "SAFETY", - "SELF_HARM", - "SPECIAL_NEEDS", - "TERRORISM", - "TOXIC", - "TROUBLED_RELATIONSHIP", - "VIOLENCE_ABUSE", - "VULGAR", - "WAR_CONFLICT" - ], - "enumDescriptions": [ - "Default label.", - "Input is not sensitive.", - "Input is sensitive.", - "Input is related to accidents or disasters.", - "Input contains adult content.", - "Input is related to computer security.", - "Input contains controversial social issues.", - "Input is related to death tragedy.", - "Input is related to drugs.", - "Input is related to identity or ethnicity.", - "Input is related to financial hardship.", - "Input is related to firearms or weapons.", - "Input contains health related information.", - "Input may be an insult.", - "Input is related to legal content.", - "Input contains mental health related information.", - "Input is related to politics.", - "Input is related to religions or beliefs.", - "Input is related to safety.", - "Input is related to self-harm.", - "Input is related to special needs.", - "Input is related to terrorism.", - "Input is toxic.", - "Input is related to troubled relationships.", - "Input contains content about violence or abuse.", - "Input is vulgar.", - "Input is related to war and conflict." - ], + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, - "score": { - "format": "float", - "type": "number" + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", + "type": "object" } }, "type": "object" }, - "LearningGenaiRootTranslationRequestInfo": { - "description": "Each TranslationRequestInfo corresponds to a request sent to the translation server.", - "id": "LearningGenaiRootTranslationRequestInfo", + "GoogleProtobufEmpty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", + "id": "GoogleProtobufEmpty", + "properties": {}, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "GoogleRpcStatus", "properties": { - "detectedLanguageCodes": { - "description": "The ISO-639 language code of source text in the initial request, detected automatically, if no source language was passed within the initial request. If the source language was passed, auto-detection of the language does not occur and this field is empty.", + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { - "type": "string" + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" }, "type": "array" }, - "totalContentSize": { - "description": "The sum of the size of all the contents in the request.", - "format": "int64", + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, - "LearningServingLlmAtlasOutputMetadata": { - "id": "LearningServingLlmAtlasOutputMetadata", + "GoogleTypeColor": { + "description": "Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ...", + "id": "GoogleTypeColor", "properties": { - "requestTopic": { - "type": "string" + "alpha": { + "description": "The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0).", + "format": "float", + "type": "number" }, - "source": { - "enum": [ - "UNKNOWN", - "FACTUALITY", - "INFOBOT", - "LLM" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" + "blue": { + "description": "The amount of blue in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" + }, + "green": { + "description": "The amount of green in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" + }, + "red": { + "description": "The amount of red in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" } }, "type": "object" }, - "LearningServingLlmMessageMetadata": { - "description": "LINT.IfChange This metadata contains additional information required for debugging.", - "id": "LearningServingLlmMessageMetadata", + "GoogleTypeDate": { + "description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp", + "id": "GoogleTypeDate", "properties": { - "atlasMetadata": { - "$ref": "LearningServingLlmAtlasOutputMetadata" - }, - "classifierSummary": { - "$ref": "LearningGenaiRootClassifierOutputSummary", - "description": "Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not." - }, - "codeyOutput": { - "$ref": "LearningGenaiRootCodeyOutput", - "description": "Contains metadata related to Codey Processors." - }, - "currentStreamTextLength": { - "format": "uint32", - "type": "integer" - }, - "deleted": { - "description": "Whether the corresponding message has been deleted.", - "type": "boolean" - }, - "filterMeta": { - "description": "Metadata for filters that triggered.", - "items": { - "$ref": "LearningGenaiRootFilterMetadata" - }, - "type": "array" - }, - "finalMessageScore": { - "$ref": "LearningGenaiRootScore", - "description": "This score is finally used for ranking the message. This will be same as the score present in `Message.score` field." - }, - "finishReason": { - "description": "NOT YET IMPLEMENTED.", - "enum": [ - "UNSPECIFIED", - "RETURN", - "STOP", - "MAX_TOKENS", - "FILTER", - "TOP_N_FILTERED" - ], - "enumDescriptions": [ - "", - "Return all the tokens back. This typically implies no filtering or stop sequence was triggered.", - "Finished due to provided stop sequence.", - "Model has emitted the maximum number of tokens as specified by max_decoding_steps.", - "Finished due to triggering some post-processing filter.", - "Filtered out due to Top_N < Response_Candidates.Size()" - ], - "type": "string" - }, - "groundingMetadata": { - "$ref": "LearningGenaiRootGroundingMetadata" - }, - "isCode": { - "description": "Applies to streaming response message only. Whether the message is a code.", - "type": "boolean" - }, - "isFallback": { - "description": "Applies to Response message only. Indicates whether the message is a fallback and the response would have otherwise been empty.", - "type": "boolean" - }, - "langidResult": { - "$ref": "NlpSaftLangIdResult", - "description": "Result from nlp_saft DetectLanguage method. Currently the predicted language code and language probability is used." - }, - "language": { - "description": "Detected language.", - "type": "string" - }, - "lmPrefix": { - "description": "The LM prefix used to generate this response.", - "type": "string" - }, - "lmrootInternalRequestMetrics": { - "$ref": "LearningGenaiRootRequestMetrics", - "description": "FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames." - }, - "mmRecitationResult": { - "$ref": "LearningGenaiRecitationMMRecitationCheckResult", - "description": "Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked." - }, - "numRewinds": { - "description": "Number of Controlled Decoding rewind and repeats that have happened for this response.", - "format": "uint32", - "type": "integer" - }, - "originalText": { - "description": "The original text generated by LLM. This is the raw output for debugging purposes.", - "type": "string" - }, - "perStreamDecodedTokenCount": { - "description": "Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only.", - "format": "int32", - "type": "integer" - }, - "perStreamReturnedTokenCount": { - "description": "Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only.", + "day": { + "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.", "format": "int32", "type": "integer" }, - "raiOutputs": { - "description": "Results of running RAI on the query or this response candidate. One output per rai_config. It will be populated regardless of whether the threshold is exceeded or not.", - "items": { - "$ref": "LearningGenaiRootRAIOutput" - }, - "type": "array" - }, - "recitationResult": { - "$ref": "LearningGenaiRecitationRecitationResult", - "description": "Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome." - }, - "scores": { - "description": "All the different scores for a message are logged here.", - "items": { - "$ref": "LearningGenaiRootScore" - }, - "type": "array" - }, - "streamTerminated": { - "description": "Whether the response is terminated during streaming return. Only used for streaming requests.", - "type": "boolean" - }, - "totalDecodedTokenCount": { - "description": "Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate.", + "month": { + "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.", "format": "int32", "type": "integer" }, - "totalReturnedTokenCount": { - "description": "Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only.", + "year": { + "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.", "format": "int32", "type": "integer" - }, - "translatedUserPrompts": { - "description": "Translated user-prompt used for RAI post processing. This is for internal processing only. We will translate in pre-processor and pass the translated text to the post processor using this field. It will be empty if non of the signals requested need translation.", - "items": { - "type": "string" - }, - "type": "array" - }, - "vertexRaiResult": { - "$ref": "CloudAiNlLlmProtoServiceRaiResult", - "description": "The metadata from Vertex SafetyCat processors" - } - }, - "type": "object" - }, - "NlpSaftLangIdLocalesResult": { - "id": "NlpSaftLangIdLocalesResult", - "properties": { - "predictions": { - "description": "List of locales in which the text would be considered acceptable. Sorted in descending order according to each locale's respective likelihood. For example, if a Portuguese text is acceptable in both Brazil and Portugal, but is more strongly associated with Brazil, then the predictions would be [\"pt-BR\", \"pt-PT\"], in that order. May be empty, indicating that the model did not predict any acceptable locales.", - "items": { - "$ref": "NlpSaftLangIdLocalesResultLocale" - }, - "type": "array" - } - }, - "type": "object" - }, - "NlpSaftLangIdLocalesResultLocale": { - "id": "NlpSaftLangIdLocalesResultLocale", - "properties": { - "languageCode": { - "description": "A BCP 47 language code that includes region information. For example, \"pt-BR\" or \"pt-PT\". This field will always be populated.", - "type": "string" } }, "type": "object" }, - "NlpSaftLangIdResult": { - "id": "NlpSaftLangIdResult", + "GoogleTypeExpr": { + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", + "id": "GoogleTypeExpr", "properties": { - "modelVersion": { - "description": "The version of the model used to create these annotations.", - "enum": [ - "VERSION_UNSPECIFIED", - "INDEXING_20181017", - "INDEXING_20191206", - "INDEXING_20200313", - "INDEXING_20210618", - "STANDARD_20220516" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], + "description": { + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, - "predictions": { - "description": "This field stores the n-best list of possible BCP 47 language code strings for a given input sorted in descending order according to each code's respective probability.", - "items": { - "$ref": "NlpSaftLanguageSpan" - }, - "type": "array" - }, - "spanPredictions": { - "description": "This field stores language predictions of subspans of the input, when available. Each LanguageSpanSequence is a sequence of LanguageSpans. A particular sequence of LanguageSpans has an associated probability, and need not necessarily cover the entire input. If no language could be predicted for any span, then this field may be empty.", - "items": { - "$ref": "NlpSaftLanguageSpanSequence" - }, - "type": "array" - } - }, - "type": "object" - }, - "NlpSaftLanguageSpan": { - "id": "NlpSaftLanguageSpan", - "properties": { - "end": { - "format": "int32", - "type": "integer" - }, - "languageCode": { - "description": "A BCP 47 language code for this span.", + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, - "locales": { - "$ref": "NlpSaftLangIdLocalesResult", - "description": "Optional field containing any information that was predicted about the specific locale(s) of the span." - }, - "probability": { - "description": "A probability associated with this prediction.", - "format": "float", - "type": "number" + "location": { + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" }, - "start": { - "description": "Start and end byte offsets, inclusive, within the given input string. A value of -1 implies that this field is not set. Both fields must either be set with a nonnegative value or both are unset. If both are unset then this LanguageSpan applies to the entire input.", - "format": "int32", - "type": "integer" + "title": { + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" } }, "type": "object" }, - "NlpSaftLanguageSpanSequence": { - "id": "NlpSaftLanguageSpanSequence", + "GoogleTypeInterval": { + "description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", + "id": "GoogleTypeInterval", "properties": { - "languageSpans": { - "description": "A sequence of LanguageSpan objects, each assigning a language to a subspan of the input.", - "items": { - "$ref": "NlpSaftLanguageSpan" - }, - "type": "array" + "endTime": { + "description": "Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end.", + "format": "google-datetime", + "type": "string" }, - "probability": { - "description": "The probability of this sequence of LanguageSpans.", - "format": "float", - "type": "number" + "startTime": { + "description": "Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start.", + "format": "google-datetime", + "type": "string" } }, "type": "object" }, - "Proto2BridgeMessageSet": { - "description": "This is proto2's version of MessageSet.", - "id": "Proto2BridgeMessageSet", - "properties": {}, - "type": "object" - }, - "UtilStatusProto": { - "description": "Wire-format for a Status object", - "id": "UtilStatusProto", + "GoogleTypeMoney": { + "description": "Represents an amount of money with its currency type.", + "id": "GoogleTypeMoney", "properties": { - "canonicalCode": { - "description": "The canonical error code (see codes.proto) that most closely corresponds to this status. This may be missing, and in the common case of the generic space, it definitely will be.", - "format": "int32", - "type": "integer" + "currencyCode": { + "description": "The three-letter currency code defined in ISO 4217.", + "type": "string" }, - "code": { - "description": "Numeric code drawn from the space specified below. Often, this is the canonical error space, and code is drawn from google3/util/task/codes.proto", + "nanos": { + "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", "format": "int32", "type": "integer" }, - "message": { - "description": "Detail message", - "type": "string" - }, - "messageSet": { - "$ref": "Proto2BridgeMessageSet", - "description": "message_set associates an arbitrary proto message with the status." - }, - "space": { - "description": "The following are usually only present when code != 0 Space to which this status belongs", + "units": { + "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", + "format": "int64", "type": "string" } }, diff --git a/src/apis/aiplatform/v1.ts b/src/apis/aiplatform/v1.ts index 4b93d8bcb0..e9a195d47c 100644 --- a/src/apis/aiplatform/v1.ts +++ b/src/apis/aiplatform/v1.ts @@ -126,15 +126,6 @@ export namespace aiplatform_v1 { } } - /** - * Video embedding response. - */ - export interface Schema$CloudAiLargeModelsVisionEmbedVideoResponse { - /** - * The embedding vector for the video. - */ - videoEmbeddings?: any[] | null; - } /** * Details for filtered input text. */ @@ -236,15 +227,6 @@ export namespace aiplatform_v1 { */ video?: Schema$CloudAiLargeModelsVisionVideo; } - /** - * Generate media content response - */ - export interface Schema$CloudAiLargeModelsVisionMediaGenerateContentResponse { - /** - * Response to the user's request. - */ - response?: Schema$CloudAiNlLlmProtoServiceGenerateMultiModalResponse; - } export interface Schema$CloudAiLargeModelsVisionNamedBoundingBox { classes?: string[] | null; entities?: string[] | null; @@ -264,41 +246,6 @@ export namespace aiplatform_v1 { */ scores?: number[] | null; } - /** - * Video reasoning response. - */ - export interface Schema$CloudAiLargeModelsVisionReasonVideoResponse { - /** - * Generated text responses. The generated responses for different segments within the same video. - */ - responses?: Schema$CloudAiLargeModelsVisionReasonVideoResponseTextResponse[]; - } - /** - * Contains text that is the response of the video captioning. - */ - export interface Schema$CloudAiLargeModelsVisionReasonVideoResponseTextResponse { - /** - * Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video. - */ - relativeTemporalPartition?: Schema$CloudAiLargeModelsVisionRelativeTemporalPartition; - /** - * Text information - */ - text?: string | null; - } - /** - * For ease of use, assume that the start_offset is inclusive and the end_offset is exclusive. In mathematical terms, the partition would be written as [start_offset, end_offset). - */ - export interface Schema$CloudAiLargeModelsVisionRelativeTemporalPartition { - /** - * End time offset of the partition. - */ - endOffset?: string | null; - /** - * Start time offset of the partition. - */ - startOffset?: string | null; - } export interface Schema$CloudAiLargeModelsVisionSemanticFilterResponse { /** * Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates. @@ -322,3878 +269,3420 @@ export namespace aiplatform_v1 { */ video?: string | null; } - export interface Schema$CloudAiNlLlmProtoServiceCandidate { + /** + * Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; \} service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); \} Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); \} Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged. + */ + export interface Schema$GoogleApiHttpBody { /** - * Source attribution of the generated content. + * The HTTP Content-Type header value specifying the content type of the body. */ - citationMetadata?: Schema$CloudAiNlLlmProtoServiceCitationMetadata; + contentType?: string | null; /** - * Content of the candidate. + * The HTTP request/response body as raw binary. */ - content?: Schema$CloudAiNlLlmProtoServiceContent; + data?: string | null; /** - * A string that describes the filtering behavior in more detail. Only filled when reason is set. + * Application specific response metadata. Must be set in the first response for streaming APIs. */ - finishMessage?: string | null; + extensions?: Array<{[key: string]: any}> | null; + } + /** + * Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy. + */ + export interface Schema$GoogleCloudAiplatformV1ActiveLearningConfig { /** - * The reason why the model stopped generating tokens. + * Max number of human labeled DataItems. */ - finishReason?: string | null; + maxDataItemCount?: string | null; /** - * Grounding metadata. Combine with the facts list from response to generate grounding citations for this choice. + * Max percent of total DataItems for human labeling. */ - groundingMetadata?: Schema$LearningGenaiRootGroundingMetadata; + maxDataItemPercentage?: number | null; /** - * Index of the candidate. + * Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy. */ - index?: number | null; + sampleConfig?: Schema$GoogleCloudAiplatformV1SampleConfig; /** - * Safety ratings of the generated content. + * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. */ - safetyRatings?: Schema$CloudAiNlLlmProtoServiceSafetyRating[]; + trainingConfig?: Schema$GoogleCloudAiplatformV1TrainingConfig; } /** - * Source attributions for content. + * Request message for MetadataService.AddContextArtifactsAndExecutions. */ - export interface Schema$CloudAiNlLlmProtoServiceCitation { - /** - * End index into the content. - */ - endIndex?: number | null; - /** - * License of the attribution. - */ - license?: string | null; + export interface Schema$GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest { /** - * Publication date of the attribution. + * The resource names of the Artifacts to attribute to the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/artifacts/{artifact\}` */ - publicationDate?: Schema$GoogleTypeDate; + artifacts?: string[] | null; /** - * Start index into the content. + * The resource names of the Executions to associate with the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/executions/{execution\}` */ - startIndex?: number | null; + executions?: string[] | null; + } + /** + * Response message for MetadataService.AddContextArtifactsAndExecutions. + */ + export interface Schema$GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse {} + /** + * Request message for MetadataService.AddContextChildren. + */ + export interface Schema$GoogleCloudAiplatformV1AddContextChildrenRequest { /** - * Title of the attribution. + * The resource names of the child Contexts. */ - title?: string | null; + childContexts?: string[] | null; + } + /** + * Response message for MetadataService.AddContextChildren. + */ + export interface Schema$GoogleCloudAiplatformV1AddContextChildrenResponse {} + /** + * Request message for MetadataService.AddExecutionEvents. + */ + export interface Schema$GoogleCloudAiplatformV1AddExecutionEventsRequest { /** - * Url reference of the attribution. + * The Events to create and add. */ - uri?: string | null; + events?: Schema$GoogleCloudAiplatformV1Event[]; } /** - * A collection of source attributions for a piece of content. + * Response message for MetadataService.AddExecutionEvents. + */ + export interface Schema$GoogleCloudAiplatformV1AddExecutionEventsResponse {} + /** + * Request message for VizierService.AddTrialMeasurement. */ - export interface Schema$CloudAiNlLlmProtoServiceCitationMetadata { + export interface Schema$GoogleCloudAiplatformV1AddTrialMeasurementRequest { /** - * List of citations. + * Required. The measurement to be added to a Trial. */ - citations?: Schema$CloudAiNlLlmProtoServiceCitation[]; + measurement?: Schema$GoogleCloudAiplatformV1Measurement; } /** - * The content of a single message from a participant. + * Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem. */ - export interface Schema$CloudAiNlLlmProtoServiceContent { + export interface Schema$GoogleCloudAiplatformV1Annotation { /** - * If true, the content is from a cached content. + * Output only. The source of the Annotation. */ - isCached?: boolean | null; + annotationSource?: Schema$GoogleCloudAiplatformV1UserActionReference; /** - * The parts of the message. + * Output only. Timestamp when this Annotation was created. */ - parts?: Schema$CloudAiNlLlmProtoServicePart[]; + createTime?: string | null; /** - * The role of the current conversation participant. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - role?: string | null; - } - /** - * A condense version of WorldFact (assistant/boq/lamda/factuality/proto/factuality.proto) to propagate the essential information about the fact used in factuality to the upstream caller. - */ - export interface Schema$CloudAiNlLlmProtoServiceFact { + etag?: string | null; /** - * Query that is used to retrieve this fact. + * Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Annotation: * "aiplatform.googleapis.com/annotation_set_name": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * "aiplatform.googleapis.com/payload_schema": output only, its value is the payload_schema's title. */ - query?: string | null; + labels?: {[key: string]: string} | null; /** - * If present, the summary/snippet of the fact. + * Output only. Resource name of the Annotation. */ - summary?: string | null; + name?: string | null; /** - * If present, it refers to the title of this fact. + * Required. The schema of the payload can be found in payload_schema. */ - title?: string | null; + payload?: any | null; + /** + * Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata. + */ + payloadSchemaUri?: string | null; /** - * If present, this URL links to the webpage of the fact. + * Output only. Timestamp when this Annotation was last updated. */ - url?: string | null; + updateTime?: string | null; } /** - * Function call details. + * Identifies a concept with which DataItems may be annotated with. */ - export interface Schema$CloudAiNlLlmProtoServiceFunctionCall { + export interface Schema$GoogleCloudAiplatformV1AnnotationSpec { /** - * The function parameters and values in JSON format. + * Output only. Timestamp when this AnnotationSpec was created. */ - args?: {[key: string]: any} | null; + createTime?: string | null; + /** + * Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters. + */ + displayName?: string | null; + /** + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + etag?: string | null; /** - * Required. The name of the function to call. + * Output only. Resource name of the AnnotationSpec. */ name?: string | null; + /** + * Output only. Timestamp when AnnotationSpec was last updated. + */ + updateTime?: string | null; } /** - * Function response details. + * Instance of a general artifact. */ - export interface Schema$CloudAiNlLlmProtoServiceFunctionResponse { + export interface Schema$GoogleCloudAiplatformV1Artifact { /** - * Required. The name of the function to call. + * Output only. Timestamp when this Artifact was created. */ - name?: string | null; + createTime?: string | null; /** - * Required. The function response in JSON object format. + * Description of the Artifact */ - response?: {[key: string]: any} | null; - } - export interface Schema$CloudAiNlLlmProtoServiceGenerateMultiModalResponse { + description?: string | null; /** - * Possible candidate responses to the conversation up until this point. + * User provided display name of the Artifact. May be up to 128 Unicode characters. */ - candidates?: Schema$CloudAiNlLlmProtoServiceCandidate[]; + displayName?: string | null; /** - * Debug information containing message metadata. Clients should not consume this field, and this is only populated for Flow Runner path. + * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - debugMetadata?: Schema$CloudAiNlLlmProtoServiceMessageMetadata; + etag?: string | null; /** - * External facts retrieved for factuality/grounding. + * The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded). */ - facts?: Schema$CloudAiNlLlmProtoServiceFact[]; + labels?: {[key: string]: string} | null; /** - * Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. + * Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. */ - promptFeedback?: Schema$CloudAiNlLlmProtoServicePromptFeedback; + metadata?: {[key: string]: any} | null; /** - * Billable prediction metrics. + * Output only. The resource name of the Artifact. */ - reportingMetrics?: Schema$IntelligenceCloudAutomlXpsReportingMetrics; + name?: string | null; /** - * Usage metadata about the response(s). + * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - usageMetadata?: Schema$CloudAiNlLlmProtoServiceUsageMetadata; - } - export interface Schema$CloudAiNlLlmProtoServiceMessageMetadata { + schemaTitle?: string | null; /** - * Factuality-related debug metadata. + * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - factualityDebugMetadata?: Schema$LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata; + schemaVersion?: string | null; /** - * Filter metadata of the input messages. + * The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions. */ - inputFilterInfo?: Schema$LearningServingLlmMessageMetadata; + state?: string | null; /** - * This score is generated by the router model to decide which model to use + * Output only. Timestamp when this Artifact was last updated. */ - modelRoutingDecision?: Schema$LearningGenaiRootRoutingDecision; + updateTime?: string | null; /** - * Filter metadata of the output messages. + * The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file. */ - outputFilterInfo?: Schema$LearningServingLlmMessageMetadata[]; + uri?: string | null; } /** - * A single part of a message. + * Metadata information for NotebookService.AssignNotebookRuntime. */ - export interface Schema$CloudAiNlLlmProtoServicePart { + export interface Schema$GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata { /** - * Document metadata. The metadata should only be used by the Cloud LLM when supporting document mime types. It will only be populated when this image input part is converted from a document input part. + * The operation generic information. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + /** + * A human-readable message that shows the intermediate progress details of NotebookRuntime. */ - documentMetadata?: Schema$CloudAiNlLlmProtoServicePartDocumentMetadata; + progressMessage?: string | null; + } + /** + * Request message for NotebookService.AssignNotebookRuntime. + */ + export interface Schema$GoogleCloudAiplatformV1AssignNotebookRuntimeRequest { /** - * URI-based data. + * Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment. */ - fileData?: Schema$CloudAiNlLlmProtoServicePartFileData; + notebookRuntime?: Schema$GoogleCloudAiplatformV1NotebookRuntime; /** - * Function call data. + * Optional. User specified ID for the notebook runtime. */ - functionCall?: Schema$CloudAiNlLlmProtoServiceFunctionCall; + notebookRuntimeId?: string | null; /** - * Function response data. + * Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one). */ - functionResponse?: Schema$CloudAiNlLlmProtoServiceFunctionResponse; + notebookRuntimeTemplate?: string | null; + } + /** + * Attribution that explains a particular prediction output. + */ + export interface Schema$GoogleCloudAiplatformV1Attribution { /** - * Inline bytes data + * Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information. */ - inlineData?: Schema$CloudAiNlLlmProtoServicePartBlob; + approximationError?: number | null; /** - * Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields. + * Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank \> 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged. */ - lmRootMetadata?: Schema$CloudAiNlLlmProtoServicePartLMRootMetadata; + baselineOutputValue?: number | null; /** - * Text input. + * Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated). */ - text?: string | null; + featureAttributions?: any | null; /** - * Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + * Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index. */ - videoMetadata?: Schema$CloudAiNlLlmProtoServicePartVideoMetadata; - } - /** - * Represents arbitrary blob data input. - */ - export interface Schema$CloudAiNlLlmProtoServicePartBlob { + instanceOutputValue?: number | null; /** - * Inline data. + * Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index. */ - data?: string | null; + outputDisplayName?: string | null; /** - * The mime type corresponding to this input. + * Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0. */ - mimeType?: string | null; + outputIndex?: number[] | null; /** - * Original file data where the blob comes from. + * Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs. */ - originalFileData?: Schema$CloudAiNlLlmProtoServicePartFileData; + outputName?: string | null; } /** - * Metadata describes the original input document content. + * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. */ - export interface Schema$CloudAiNlLlmProtoServicePartDocumentMetadata { + export interface Schema$GoogleCloudAiplatformV1AutomaticResources { /** - * The original document blob. + * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. */ - originalDocumentBlob?: Schema$CloudAiNlLlmProtoServicePartBlob; + maxReplicaCount?: number | null; /** - * The (1-indexed) page number of the image in the original document. The first page carries the original document content and mime type. + * Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. */ - pageNumber?: number | null; + minReplicaCount?: number | null; } /** - * Represents file data. + * The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count. */ - export interface Schema$CloudAiNlLlmProtoServicePartFileData { + export interface Schema$GoogleCloudAiplatformV1AutoscalingMetricSpec { /** - * Inline data. + * Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` */ - fileUri?: string | null; + metricName?: string | null; /** - * The mime type corresponding to this input. + * The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. */ - mimeType?: string | null; + target?: number | null; } /** - * Metadata provides extra info for building the LM Root request. + * The storage details for Avro input content. */ - export interface Schema$CloudAiNlLlmProtoServicePartLMRootMetadata { + export interface Schema$GoogleCloudAiplatformV1AvroSource { /** - * Chunk id that will be used when mapping the part to the LM Root's chunk. + * Required. Google Cloud Storage location. */ - chunkId?: string | null; + gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; } /** - * Metadata describes the input video content. + * Request message for PipelineService.BatchCancelPipelineJobs. */ - export interface Schema$CloudAiNlLlmProtoServicePartVideoMetadata { - /** - * The end offset of the video. - */ - endOffset?: string | null; + export interface Schema$GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest { /** - * The start offset of the video. + * Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` */ - startOffset?: string | null; + names?: string[] | null; } /** - * Content filter results for a prompt sent in the request. + * Details of operations that perform batch create Features. */ - export interface Schema$CloudAiNlLlmProtoServicePromptFeedback { + export interface Schema$GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata { /** - * Blocked reason. + * Operation metadata for Feature. */ - blockReason?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Request message for FeaturestoreService.BatchCreateFeatures. + */ + export interface Schema$GoogleCloudAiplatformV1BatchCreateFeaturesRequest { /** - * A readable block reason message. + * Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message. */ - blockReasonMessage?: string | null; + requests?: Schema$GoogleCloudAiplatformV1CreateFeatureRequest[]; + } + /** + * Response message for FeaturestoreService.BatchCreateFeatures. + */ + export interface Schema$GoogleCloudAiplatformV1BatchCreateFeaturesResponse { /** - * Safety ratings. + * The Features created. */ - safetyRatings?: Schema$CloudAiNlLlmProtoServiceSafetyRating[]; + features?: Schema$GoogleCloudAiplatformV1Feature[]; } /** - * The RAI results for a given text. Next ID: 12 + * Request message for TensorboardService.BatchCreateTensorboardRuns. */ - export interface Schema$CloudAiNlLlmProtoServiceRaiResult { + export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest { /** - * Recitation result from Aida recitation checker. + * Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch. */ - aidaRecitationResult?: Schema$LanguageLabsAidaTrustRecitationProtoRecitationResult; + requests?: Schema$GoogleCloudAiplatformV1CreateTensorboardRunRequest[]; + } + /** + * Response message for TensorboardService.BatchCreateTensorboardRuns. + */ + export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse { /** - * Use `triggered_blocklist`. + * The created TensorboardRuns. */ - blocked?: boolean | null; + tensorboardRuns?: Schema$GoogleCloudAiplatformV1TensorboardRun[]; + } + /** + * Request message for TensorboardService.BatchCreateTensorboardTimeSeries. + */ + export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { /** - * The error codes indicate which RAI filters block the response. + * Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch. */ - errorCodes?: number[] | null; + requests?: Schema$GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest[]; + } + /** + * Response message for TensorboardService.BatchCreateTensorboardTimeSeries. + */ + export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { /** - * Whether the text should be filtered and not shown to the end user. This is determined based on a combination of `triggered_recitation`, `triggered_blocklist`, `language_filter_result`, and `triggered_safety_filter`. + * The created TensorboardTimeSeries. */ - filtered?: boolean | null; + tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1TensorboardTimeSeries[]; + } + /** + * A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration. + */ + export interface Schema$GoogleCloudAiplatformV1BatchDedicatedResources { /** - * Language filter result from SAFT LangId. + * Required. Immutable. The specification of a single machine. */ - languageFilterResult?: Schema$LearningGenaiRootLanguageFilterResult; + machineSpec?: Schema$GoogleCloudAiplatformV1MachineSpec; /** - * Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. + * Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10. */ - mmRecitationResult?: Schema$LearningGenaiRecitationMMRecitationCheckResult; + maxReplicaCount?: number | null; /** - * The RAI signals for the text. + * Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count */ - raiSignals?: Schema$CloudAiNlLlmProtoServiceRaiSignal[]; + startingReplicaCount?: number | null; + } + /** + * Request message for PipelineService.BatchDeletePipelineJobs. + */ + export interface Schema$GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest { /** - * Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server. + * Required. The names of the PipelineJobs to delete. A maximum of 32 PipelineJobs can be deleted in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` */ - translationRequestInfos?: Schema$LearningGenaiRootTranslationRequestInfo[]; + names?: string[] | null; + } + /** + * Request message for ModelService.BatchImportEvaluatedAnnotations + */ + export interface Schema$GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest { /** - * Whether the text triggered the blocklist. + * Required. Evaluated annotations resource to be imported. */ - triggeredBlocklist?: boolean | null; + evaluatedAnnotations?: Schema$GoogleCloudAiplatformV1EvaluatedAnnotation[]; + } + /** + * Response message for ModelService.BatchImportEvaluatedAnnotations + */ + export interface Schema$GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse { /** - * Whether the text should be blocked by the recitation result from Aida recitation checker. It is determined from aida_recitation_result. + * Output only. Number of EvaluatedAnnotations imported. */ - triggeredRecitation?: boolean | null; + importedEvaluatedAnnotationsCount?: number | null; + } + /** + * Request message for ModelService.BatchImportModelEvaluationSlices + */ + export interface Schema$GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest { /** - * Whether the text triggered the safety filter. Currently, this is due to CSAI triggering or one of four categories (derogatory, sexual, toxic, violent) having a score over the filter threshold. + * Required. Model evaluation slice resource to be imported. */ - triggeredSafetyFilter?: boolean | null; + modelEvaluationSlices?: Schema$GoogleCloudAiplatformV1ModelEvaluationSlice[]; } /** - * An RAI signal for a single category. + * Response message for ModelService.BatchImportModelEvaluationSlices */ - export interface Schema$CloudAiNlLlmProtoServiceRaiSignal { + export interface Schema$GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse { /** - * The confidence level for the RAI category. + * Output only. List of imported ModelEvaluationSlice.name. */ - confidence?: string | null; + importedModelEvaluationSlices?: string[] | null; + } + /** + * Runtime operation information for MigrationService.BatchMigrateResources. + */ + export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata { /** - * Whether the category is flagged as being present. Currently, this is set to true if score \>= 0.5. + * The common part of the operation metadata. */ - flagged?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * The influential terms that could potentially block the response. + * Partial results that reflect the latest migration operation progress. */ - influentialTerms?: Schema$CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm[]; + partialResults?: Schema$GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult[]; + } + /** + * Represents a partial result in batch migration operation for one MigrateResourceRequest. + */ + export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult { /** - * The RAI category. + * Migrated dataset resource name. */ - raiCategory?: string | null; + dataset?: string | null; /** - * The score for the category, in the range [0.0, 1.0]. + * The error result of the migration request in case of failure. */ - score?: number | null; - } - /** - * The influential term that could potentially block the response. - */ - export interface Schema$CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm { + error?: Schema$GoogleRpcStatus; /** - * The beginning offset of the influential term. + * Migrated model resource name. */ - beginOffset?: number | null; + model?: string | null; /** - * The confidence score of the influential term. + * It's the same as the value in MigrateResourceRequest.migrate_resource_requests. */ - confidence?: number | null; + request?: Schema$GoogleCloudAiplatformV1MigrateResourceRequest; + } + /** + * Request message for MigrationService.BatchMigrateResources. + */ + export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesRequest { /** - * The source of the influential term, prompt or response. + * Required. The request messages specifying the resources to migrate. They must be in the same location as the destination. Up to 50 resources can be migrated in one batch. */ - source?: string | null; + migrateResourceRequests?: Schema$GoogleCloudAiplatformV1MigrateResourceRequest[]; + } + /** + * Response message for MigrationService.BatchMigrateResources. + */ + export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesResponse { /** - * The influential term. + * Successfully migrated resources. */ - term?: string | null; + migrateResourceResponses?: Schema$GoogleCloudAiplatformV1MigrateResourceResponse[]; } /** - * Safety rating corresponding to the generated content. + * A job that uses a Model to produce predictions on multiple input instances. If predictions for significant portion of the instances fail, the job may finish without attempting predictions for all remaining instances. */ - export interface Schema$CloudAiNlLlmProtoServiceSafetyRating { + export interface Schema$GoogleCloudAiplatformV1BatchPredictionJob { /** - * Indicates whether the content was filtered out because of this rating. + * Output only. Statistics on completed and failed prediction instances. */ - blocked?: boolean | null; + completionStats?: Schema$GoogleCloudAiplatformV1CompletionStats; /** - * Harm category. + * Output only. Time when the BatchPredictionJob was created. */ - category?: string | null; + createTime?: string | null; /** - * The influential terms that could potentially block the response. + * The config of resources used by the Model during the batch prediction. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. */ - influentialTerms?: Schema$CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm[]; + dedicatedResources?: Schema$GoogleCloudAiplatformV1BatchDedicatedResources; /** - * Harm probability levels in the content. + * For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. */ - probability?: string | null; + disableContainerLogging?: boolean | null; /** - * Harm probability score. + * Required. The user-defined name of this BatchPredictionJob. */ - probabilityScore?: number | null; + displayName?: string | null; /** - * Harm severity levels in the content. + * Customer-managed encryption key options for a BatchPredictionJob. If this is set, then all resources created by the BatchPredictionJob will be encrypted with the provided encryption key. */ - severity?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * Harm severity score. + * Output only. Time when the BatchPredictionJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ - severityScore?: number | null; - } - /** - * The influential term that could potentially block the response. - */ - export interface Schema$CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm { + endTime?: string | null; /** - * The beginning offset of the influential term. + * Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. */ - beginOffset?: number | null; + error?: Schema$GoogleRpcStatus; /** - * The confidence score of the influential term. + * Explanation configuration for this BatchPredictionJob. Can be specified only if generate_explanation is set to `true`. This value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of the explanation_spec object is not populated, the corresponding field of the Model.explanation_spec object is inherited. */ - confidence?: number | null; + explanationSpec?: Schema$GoogleCloudAiplatformV1ExplanationSpec; /** - * The source of the influential term, prompt or response. + * Generate explanation with the batch prediction results. When set to `true`, the batch prediction output changes based on the `predictions_format` field of the BatchPredictionJob.output_config object: * `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the Explanation object. * `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the Explanation object. * `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_spec must be populated. */ - source?: string | null; + generateExplanation?: boolean | null; /** - * The influential term. + * Required. Input configuration of the instances on which predictions are performed. The schema of any single instance may be specified via the Model's PredictSchemata's instance_schema_uri. */ - term?: string | null; - } - /** - * Usage metadata about response(s). - */ - export interface Schema$CloudAiNlLlmProtoServiceUsageMetadata { + inputConfig?: Schema$GoogleCloudAiplatformV1BatchPredictionJobInputConfig; /** - * Number of tokens in the response(s). + * Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. */ - candidatesTokenCount?: number | null; + instanceConfig?: Schema$GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig; /** - * Number of tokens in the request. + * The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - promptTokenCount?: number | null; - totalTokenCount?: number | null; - } - /** - * Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; \} service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); \} Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); \} Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged. - */ - export interface Schema$GoogleApiHttpBody { + labels?: {[key: string]: string} | null; /** - * The HTTP Content-Type header value specifying the content type of the body. + * Immutable. Parameters configuring the batch behavior. Currently only applicable when dedicated_resources are used (in other cases Vertex AI does the tuning itself). */ - contentType?: string | null; + manualBatchTuningParameters?: Schema$GoogleCloudAiplatformV1ManualBatchTuningParameters; /** - * The HTTP request/response body as raw binary. + * The name of the Model resource that produces the predictions via this job, must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. The model resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. The model resource could also be a publisher model. Example: `publishers/{publisher\}/models/{model\}` or `projects/{project\}/locations/{location\}/publishers/{publisher\}/models/{model\}` */ - data?: string | null; + model?: string | null; /** - * Application specific response metadata. Must be set in the first response for streaming APIs. + * The parameters that govern the predictions. The schema of the parameters may be specified via the Model's PredictSchemata's parameters_schema_uri. */ - extensions?: Array<{[key: string]: any}> | null; - } - /** - * Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy. - */ - export interface Schema$GoogleCloudAiplatformV1ActiveLearningConfig { + modelParameters?: any | null; /** - * Max number of human labeled DataItems. + * Output only. The version ID of the Model that produces the predictions via this job. */ - maxDataItemCount?: string | null; + modelVersionId?: string | null; /** - * Max percent of total DataItems for human labeling. + * Output only. Resource name of the BatchPredictionJob. */ - maxDataItemPercentage?: number | null; + name?: string | null; /** - * Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy. + * Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of Model's PredictSchemata's instance_schema_uri and prediction_schema_uri. */ - sampleConfig?: Schema$GoogleCloudAiplatformV1SampleConfig; + outputConfig?: Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputConfig; /** - * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. + * Output only. Information further describing the output of this job. */ - trainingConfig?: Schema$GoogleCloudAiplatformV1TrainingConfig; - } - /** - * Request message for MetadataService.AddContextArtifactsAndExecutions. - */ - export interface Schema$GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest { + outputInfo?: Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputInfo; /** - * The resource names of the Artifacts to attribute to the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/artifacts/{artifact\}` + * Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard Google Cloud error details. */ - artifacts?: string[] | null; + partialFailures?: Schema$GoogleRpcStatus[]; /** - * The resource names of the Executions to associate with the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/executions/{execution\}` + * Output only. Information about resources that had been consumed by this job. Provided in real time at best effort basis, as well as a final value once the job completes. Note: This field currently may be not populated for batch predictions that use AutoML Models. */ - executions?: string[] | null; - } - /** - * Response message for MetadataService.AddContextArtifactsAndExecutions. - */ - export interface Schema$GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse {} - /** - * Request message for MetadataService.AddContextChildren. - */ - export interface Schema$GoogleCloudAiplatformV1AddContextChildrenRequest { + resourcesConsumed?: Schema$GoogleCloudAiplatformV1ResourcesConsumed; /** - * The resource names of the child Contexts. + * The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. */ - childContexts?: string[] | null; - } - /** - * Response message for MetadataService.AddContextChildren. - */ - export interface Schema$GoogleCloudAiplatformV1AddContextChildrenResponse {} - /** - * Request message for MetadataService.AddExecutionEvents. - */ - export interface Schema$GoogleCloudAiplatformV1AddExecutionEventsRequest { + serviceAccount?: string | null; /** - * The Events to create and add. + * Output only. Time when the BatchPredictionJob for the first time entered the `JOB_STATE_RUNNING` state. */ - events?: Schema$GoogleCloudAiplatformV1Event[]; - } - /** - * Response message for MetadataService.AddExecutionEvents. - */ - export interface Schema$GoogleCloudAiplatformV1AddExecutionEventsResponse {} - /** - * Request message for VizierService.AddTrialMeasurement. - */ - export interface Schema$GoogleCloudAiplatformV1AddTrialMeasurementRequest { - /** - * Required. The measurement to be added to a Trial. - */ - measurement?: Schema$GoogleCloudAiplatformV1Measurement; - } - /** - * Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem. - */ - export interface Schema$GoogleCloudAiplatformV1Annotation { - /** - * Output only. The source of the Annotation. - */ - annotationSource?: Schema$GoogleCloudAiplatformV1UserActionReference; - /** - * Output only. Timestamp when this Annotation was created. - */ - createTime?: string | null; - /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Annotation: * "aiplatform.googleapis.com/annotation_set_name": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * "aiplatform.googleapis.com/payload_schema": output only, its value is the payload_schema's title. - */ - labels?: {[key: string]: string} | null; - /** - * Output only. Resource name of the Annotation. - */ - name?: string | null; + startTime?: string | null; /** - * Required. The schema of the payload can be found in payload_schema. + * Output only. The detailed state of the job. */ - payload?: any | null; + state?: string | null; /** - * Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata. + * Contains model information necessary to perform batch prediction without requiring uploading to model registry. Exactly one of model and unmanaged_container_model must be set. */ - payloadSchemaUri?: string | null; + unmanagedContainerModel?: Schema$GoogleCloudAiplatformV1UnmanagedContainerModel; /** - * Output only. Timestamp when this Annotation was last updated. + * Output only. Time when the BatchPredictionJob was most recently updated. */ updateTime?: string | null; } /** - * Identifies a concept with which DataItems may be annotated with. + * Configures the input to BatchPredictionJob. See Model.supported_input_storage_formats for Model's supported input formats, and how instances should be expressed via any of them. */ - export interface Schema$GoogleCloudAiplatformV1AnnotationSpec { - /** - * Output only. Timestamp when this AnnotationSpec was created. - */ - createTime?: string | null; - /** - * Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters. - */ - displayName?: string | null; + export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobInputConfig { /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The BigQuery location of the input table. The schema of the table should be in the format described by the given context OpenAPI Schema, if one is provided. The table may contain additional columns that are not described by the schema, and they will be ignored. */ - etag?: string | null; + bigquerySource?: Schema$GoogleCloudAiplatformV1BigQuerySource; /** - * Output only. Resource name of the AnnotationSpec. + * The Cloud Storage location for the input instances. */ - name?: string | null; + gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; /** - * Output only. Timestamp when AnnotationSpec was last updated. + * Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. */ - updateTime?: string | null; + instancesFormat?: string | null; } /** - * Instance of a general artifact. + * Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. */ - export interface Schema$GoogleCloudAiplatformV1Artifact { - /** - * Output only. Timestamp when this Artifact was created. - */ - createTime?: string | null; - /** - * Description of the Artifact - */ - description?: string | null; + export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig { /** - * User provided display name of the Artifact. May be up to 128 Unicode characters. + * Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. */ - displayName?: string | null; + excludedFields?: string[] | null; /** - * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. */ - etag?: string | null; + includedFields?: string[] | null; /** - * The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded). + * The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the file. */ - labels?: {[key: string]: string} | null; + instanceType?: string | null; /** - * Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + * The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in excluded_fields. In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. */ - metadata?: {[key: string]: any} | null; + keyField?: string | null; + } + /** + * Configures the output of BatchPredictionJob. See Model.supported_output_storage_formats for supported output formats, and how predictions are expressed via any of them. + */ + export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputConfig { /** - * Output only. The resource name of the Artifact. + * The BigQuery project or dataset location where the output is to be written to. If project is provided, a new dataset is created with name `prediction__` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both instance and prediction schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has google.rpc.Status represented as a STRUCT, and containing only `code` and `message`. */ - name?: string | null; + bigqueryDestination?: Schema$GoogleCloudAiplatformV1BigQueryDestination; /** - * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * The Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., `predictions_N.` are created where `` depends on chosen predictions_format, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both instance and prediction schemata defined then each such file contains predictions as per the predictions_format. If prediction for any instance failed (partially or completely), then an additional `errors_0001.`, `errors_0002.`,..., `errors_N.` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has google.rpc.Status containing only `code` and `message` fields. */ - schemaTitle?: string | null; + gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; /** - * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Required. The format in which Vertex AI gives the predictions, must be one of the Model's supported_output_storage_formats. */ - schemaVersion?: string | null; + predictionsFormat?: string | null; + } + /** + * Further describes this job's output. Supplements output_config. + */ + export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputInfo { /** - * The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions. + * Output only. The path of the BigQuery dataset created, in `bq://projectId.bqDatasetId` format, into which the prediction output is written. */ - state?: string | null; + bigqueryOutputDataset?: string | null; /** - * Output only. Timestamp when this Artifact was last updated. + * Output only. The name of the BigQuery table created, in `predictions_` format, into which the prediction output is written. Can be used by UI to generate the BigQuery output path, for example. */ - updateTime?: string | null; + bigqueryOutputTable?: string | null; /** - * The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file. + * Output only. The full path of the Cloud Storage directory created, into which the prediction output is written. */ - uri?: string | null; + gcsOutputDirectory?: string | null; } /** - * Metadata information for NotebookService.AssignNotebookRuntime. + * Details of operations that batch reads Feature values. */ - export interface Schema$GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata { /** - * The operation generic information. + * Operation metadata for Featurestore batch read Features values. */ genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - /** - * A human-readable message that shows the intermediate progress details of NotebookRuntime. - */ - progressMessage?: string | null; } /** - * Request message for NotebookService.AssignNotebookRuntime. + * Request message for FeaturestoreService.BatchReadFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1AssignNotebookRuntimeRequest { - /** - * Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment. - */ - notebookRuntime?: Schema$GoogleCloudAiplatformV1NotebookRuntime; + export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { /** - * Optional. User specified ID for the notebook runtime. + * Similar to csv_read_instances, but from BigQuery source. */ - notebookRuntimeId?: string | null; + bigqueryReadInstances?: Schema$GoogleCloudAiplatformV1BigQuerySource; /** - * Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one). + * Each read instance consists of exactly one read timestamp and one or more entity IDs identifying entities of the corresponding EntityTypes whose Features are requested. Each output instance contains Feature values of requested entities concatenated together as of the read time. An example read instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z`. An example output instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each read instance must be millisecond-aligned. `csv_read_instances` are read instances stored in a plain-text CSV file. The header should be: [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in any order. Values in the timestamp column must use the RFC 3339 format, e.g. `2012-07-30T10:43:17.123Z`. */ - notebookRuntimeTemplate?: string | null; - } - /** - * Attribution that explains a particular prediction output. - */ - export interface Schema$GoogleCloudAiplatformV1Attribution { + csvReadInstances?: Schema$GoogleCloudAiplatformV1CsvSource; /** - * Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information. + * Required. Specifies output location and format. */ - approximationError?: number | null; + destination?: Schema$GoogleCloudAiplatformV1FeatureValueDestination; /** - * Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank \> 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged. + * Required. Specifies EntityType grouping Features to read values of and settings. */ - baselineOutputValue?: number | null; + entityTypeSpecs?: Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec[]; /** - * Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated). + * When not empty, the specified fields in the *_read_instances source will be joined as-is in the output, in addition to those fields from the Featurestore Entity. For BigQuery source, the type of the pass-through values will be automatically inferred. For CSV source, the pass-through values will be passed as opaque bytes. */ - featureAttributions?: any | null; + passThroughFields?: Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField[]; /** - * Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index. + * Optional. Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. */ - instanceOutputValue?: number | null; + startTime?: string | null; + } + /** + * Selects Features of an EntityType to read values of and specifies read settings. + */ + export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec { /** - * Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index. + * Required. ID of the EntityType to select Features. The EntityType id is the entity_type_id specified during EntityType creation. */ - outputDisplayName?: string | null; + entityTypeId?: string | null; /** - * Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0. + * Required. Selectors choosing which Feature values to read from the EntityType. */ - outputIndex?: number[] | null; + featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; /** - * Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs. + * Per-Feature settings for the batch read. */ - outputName?: string | null; + settings?: Schema$GoogleCloudAiplatformV1DestinationFeatureSetting[]; } /** - * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. + * Describe pass-through fields in read_instance source. */ - export interface Schema$GoogleCloudAiplatformV1AutomaticResources { - /** - * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. - */ - maxReplicaCount?: number | null; + export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField { /** - * Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + * Required. The name of the field in the CSV header or the name of the column in BigQuery table. The naming restriction is the same as Feature.name. */ - minReplicaCount?: number | null; + fieldName?: string | null; } /** - * The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count. + * Response message for FeaturestoreService.BatchReadFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1AutoscalingMetricSpec { - /** - * Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` - */ - metricName?: string | null; + export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesResponse {} + /** + * Response message for TensorboardService.BatchReadTensorboardTimeSeriesData. + */ + export interface Schema$GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { /** - * The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. + * The returned time series data. */ - target?: number | null; + timeSeriesData?: Schema$GoogleCloudAiplatformV1TimeSeriesData[]; } /** - * The storage details for Avro input content. + * The BigQuery location for the output content. */ - export interface Schema$GoogleCloudAiplatformV1AvroSource { + export interface Schema$GoogleCloudAiplatformV1BigQueryDestination { /** - * Required. Google Cloud Storage location. + * Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. */ - gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; + outputUri?: string | null; } /** - * Request message for PipelineService.BatchCancelPipelineJobs. + * The BigQuery location for the input content. */ - export interface Schema$GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest { + export interface Schema$GoogleCloudAiplatformV1BigQuerySource { /** - * Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` + * Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. */ - names?: string[] | null; + inputUri?: string | null; } /** - * Details of operations that perform batch create Features. + * Content blob. It's preferred to send as text directly rather than raw bytes. */ - export interface Schema$GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1Blob { /** - * Operation metadata for Feature. + * Required. Raw bytes. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + data?: string | null; + /** + * Required. The IANA standard MIME type of the source data. + */ + mimeType?: string | null; } /** - * Request message for FeaturestoreService.BatchCreateFeatures. + * Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 */ - export interface Schema$GoogleCloudAiplatformV1BatchCreateFeaturesRequest { + export interface Schema$GoogleCloudAiplatformV1BlurBaselineConfig { /** - * Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message. + * The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. */ - requests?: Schema$GoogleCloudAiplatformV1CreateFeatureRequest[]; + maxBlurSigma?: number | null; } /** - * Response message for FeaturestoreService.BatchCreateFeatures. + * A list of boolean values. */ - export interface Schema$GoogleCloudAiplatformV1BatchCreateFeaturesResponse { + export interface Schema$GoogleCloudAiplatformV1BoolArray { /** - * The Features created. + * A list of bool values. */ - features?: Schema$GoogleCloudAiplatformV1Feature[]; + values?: boolean[] | null; } /** - * Request message for TensorboardService.BatchCreateTensorboardRuns. + * Request message for JobService.CancelBatchPredictionJob. */ - export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest { - /** - * Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch. - */ - requests?: Schema$GoogleCloudAiplatformV1CreateTensorboardRunRequest[]; - } + export interface Schema$GoogleCloudAiplatformV1CancelBatchPredictionJobRequest {} /** - * Response message for TensorboardService.BatchCreateTensorboardRuns. + * Request message for JobService.CancelCustomJob. */ - export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse { - /** - * The created TensorboardRuns. - */ - tensorboardRuns?: Schema$GoogleCloudAiplatformV1TensorboardRun[]; - } + export interface Schema$GoogleCloudAiplatformV1CancelCustomJobRequest {} /** - * Request message for TensorboardService.BatchCreateTensorboardTimeSeries. + * Request message for JobService.CancelDataLabelingJob. */ - export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { - /** - * Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch. - */ - requests?: Schema$GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest[]; - } + export interface Schema$GoogleCloudAiplatformV1CancelDataLabelingJobRequest {} /** - * Response message for TensorboardService.BatchCreateTensorboardTimeSeries. + * Request message for JobService.CancelHyperparameterTuningJob. */ - export interface Schema$GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { - /** - * The created TensorboardTimeSeries. - */ - tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1TensorboardTimeSeries[]; - } + export interface Schema$GoogleCloudAiplatformV1CancelHyperparameterTuningJobRequest {} /** - * A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration. + * Request message for JobService.CancelNasJob. */ - export interface Schema$GoogleCloudAiplatformV1BatchDedicatedResources { + export interface Schema$GoogleCloudAiplatformV1CancelNasJobRequest {} + /** + * Request message for PipelineService.CancelPipelineJob. + */ + export interface Schema$GoogleCloudAiplatformV1CancelPipelineJobRequest {} + /** + * Request message for PipelineService.CancelTrainingPipeline. + */ + export interface Schema$GoogleCloudAiplatformV1CancelTrainingPipelineRequest {} + /** + * Request message for GenAiTuningService.CancelTuningJob. + */ + export interface Schema$GoogleCloudAiplatformV1CancelTuningJobRequest {} + /** + * A response candidate generated from the model. + */ + export interface Schema$GoogleCloudAiplatformV1Candidate { /** - * Required. Immutable. The specification of a single machine. + * Output only. Source attribution of the generated content. */ - machineSpec?: Schema$GoogleCloudAiplatformV1MachineSpec; + citationMetadata?: Schema$GoogleCloudAiplatformV1CitationMetadata; /** - * Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10. + * Output only. Content parts of the candidate. */ - maxReplicaCount?: number | null; + content?: Schema$GoogleCloudAiplatformV1Content; /** - * Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count + * Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. */ - startingReplicaCount?: number | null; - } - /** - * Request message for PipelineService.BatchDeletePipelineJobs. - */ - export interface Schema$GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest { + finishMessage?: string | null; /** - * Required. The names of the PipelineJobs to delete. A maximum of 32 PipelineJobs can be deleted in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` + * Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. */ - names?: string[] | null; + finishReason?: string | null; + /** + * Output only. Metadata specifies sources used to ground generated content. + */ + groundingMetadata?: Schema$GoogleCloudAiplatformV1GroundingMetadata; + /** + * Output only. Index of the candidate. + */ + index?: number | null; + /** + * Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. + */ + safetyRatings?: Schema$GoogleCloudAiplatformV1SafetyRating[]; } /** - * Request message for ModelService.BatchImportEvaluatedAnnotations + * This message will be placed in the metadata field of a google.longrunning.Operation associated with a CheckTrialEarlyStoppingState request. */ - export interface Schema$GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest { + export interface Schema$GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateMetatdata { /** - * Required. Evaluated annotations resource to be imported. + * Operation metadata for suggesting Trials. */ - evaluatedAnnotations?: Schema$GoogleCloudAiplatformV1EvaluatedAnnotation[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + /** + * The name of the Study that the Trial belongs to. + */ + study?: string | null; + /** + * The Trial name. + */ + trial?: string | null; } /** - * Response message for ModelService.BatchImportEvaluatedAnnotations + * Request message for VizierService.CheckTrialEarlyStoppingState. */ - export interface Schema$GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse { + export interface Schema$GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateRequest {} + /** + * Response message for VizierService.CheckTrialEarlyStoppingState. + */ + export interface Schema$GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateResponse { /** - * Output only. Number of EvaluatedAnnotations imported. + * True if the Trial should stop. */ - importedEvaluatedAnnotationsCount?: number | null; + shouldStop?: boolean | null; } /** - * Request message for ModelService.BatchImportModelEvaluationSlices + * Source attributions for content. */ - export interface Schema$GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest { + export interface Schema$GoogleCloudAiplatformV1Citation { /** - * Required. Model evaluation slice resource to be imported. + * Output only. End index into the content. */ - modelEvaluationSlices?: Schema$GoogleCloudAiplatformV1ModelEvaluationSlice[]; + endIndex?: number | null; + /** + * Output only. License of the attribution. + */ + license?: string | null; + /** + * Output only. Publication date of the attribution. + */ + publicationDate?: Schema$GoogleTypeDate; + /** + * Output only. Start index into the content. + */ + startIndex?: number | null; + /** + * Output only. Title of the attribution. + */ + title?: string | null; + /** + * Output only. Url reference of the attribution. + */ + uri?: string | null; } /** - * Response message for ModelService.BatchImportModelEvaluationSlices + * A collection of source attributions for a piece of content. */ - export interface Schema$GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse { + export interface Schema$GoogleCloudAiplatformV1CitationMetadata { /** - * Output only. List of imported ModelEvaluationSlice.name. + * Output only. List of citations. */ - importedModelEvaluationSlices?: string[] | null; + citations?: Schema$GoogleCloudAiplatformV1Citation[]; } /** - * Runtime operation information for MigrationService.BatchMigrateResources. + * Request message for VizierService.CompleteTrial. */ - export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1CompleteTrialRequest { /** - * The common part of the operation metadata. + * Optional. If provided, it will be used as the completed Trial's final_measurement; Otherwise, the service will auto-select a previously reported measurement as the final-measurement */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + finalMeasurement?: Schema$GoogleCloudAiplatformV1Measurement; /** - * Partial results that reflect the latest migration operation progress. + * Optional. A human readable reason why the trial was infeasible. This should only be provided if `trial_infeasible` is true. */ - partialResults?: Schema$GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult[]; + infeasibleReason?: string | null; + /** + * Optional. True if the Trial cannot be run with the given Parameter, and final_measurement will be ignored. + */ + trialInfeasible?: boolean | null; } /** - * Represents a partial result in batch migration operation for one MigrateResourceRequest. + * Success and error statistics of processing multiple entities (for example, DataItems or structured data rows) in batch. */ - export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult { + export interface Schema$GoogleCloudAiplatformV1CompletionStats { /** - * Migrated dataset resource name. + * Output only. The number of entities for which any error was encountered. */ - dataset?: string | null; + failedCount?: string | null; /** - * The error result of the migration request in case of failure. + * Output only. In cases when enough errors are encountered a job, pipeline, or operation may be failed as a whole. Below is the number of entities for which the processing had not been finished (either in successful or failed state). Set to -1 if the number is unknown (for example, the operation failed before the total entity number could be collected). */ - error?: Schema$GoogleRpcStatus; + incompleteCount?: string | null; /** - * Migrated model resource name. + * Output only. The number of entities that had been processed successfully. */ - model?: string | null; + successfulCount?: string | null; /** - * It's the same as the value in MigrateResourceRequest.migrate_resource_requests. + * Output only. The number of the successful forecast points that are generated by the forecasting model. This is ONLY used by the forecasting batch prediction. */ - request?: Schema$GoogleCloudAiplatformV1MigrateResourceRequest; + successfulForecastPointCount?: string | null; } /** - * Request message for MigrationService.BatchMigrateResources. + * Request message for ComputeTokens RPC call. */ - export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesRequest { + export interface Schema$GoogleCloudAiplatformV1ComputeTokensRequest { /** - * Required. The request messages specifying the resources to migrate. They must be in the same location as the destination. Up to 50 resources can be migrated in one batch. + * Required. The instances that are the input to token computing API call. Schema is identical to the prediction schema of the text model, even for the non-text models, like chat models, or Codey models. */ - migrateResourceRequests?: Schema$GoogleCloudAiplatformV1MigrateResourceRequest[]; + instances?: any[] | null; } /** - * Response message for MigrationService.BatchMigrateResources. + * Response message for ComputeTokens RPC call. */ - export interface Schema$GoogleCloudAiplatformV1BatchMigrateResourcesResponse { + export interface Schema$GoogleCloudAiplatformV1ComputeTokensResponse { /** - * Successfully migrated resources. + * Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances. */ - migrateResourceResponses?: Schema$GoogleCloudAiplatformV1MigrateResourceResponse[]; + tokensInfo?: Schema$GoogleCloudAiplatformV1TokensInfo[]; } /** - * A job that uses a Model to produce predictions on multiple input instances. If predictions for significant portion of the instances fail, the job may finish without attempting predictions for all remaining instances. + * The Container Registry location for the container image. */ - export interface Schema$GoogleCloudAiplatformV1BatchPredictionJob { + export interface Schema$GoogleCloudAiplatformV1ContainerRegistryDestination { /** - * Output only. Statistics on completed and failed prediction instances. + * Required. Container Registry URI of a container image. Only Google Container Registry and Artifact Registry are supported now. Accepted forms: * Google Container Registry path. For example: `gcr.io/projectId/imageName:tag`. * Artifact Registry path. For example: `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. If a tag is not specified, "latest" will be used as the default tag. */ - completionStats?: Schema$GoogleCloudAiplatformV1CompletionStats; + outputUri?: string | null; + } + /** + * The spec of a Container. + */ + export interface Schema$GoogleCloudAiplatformV1ContainerSpec { /** - * Output only. Time when the BatchPredictionJob was created. + * The arguments to be passed when starting the container. */ - createTime?: string | null; + args?: string[] | null; /** - * The config of resources used by the Model during the batch prediction. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. + * The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1BatchDedicatedResources; + command?: string[] | null; /** - * For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + * Environment variables to be passed to the container. Maximum limit is 100. */ - disableContainerLogging?: boolean | null; + env?: Schema$GoogleCloudAiplatformV1EnvVar[]; /** - * Required. The user-defined name of this BatchPredictionJob. + * Required. The URI of a container image in the Container Registry that is to be run on each worker replica. */ - displayName?: string | null; + imageUri?: string | null; + } + /** + * The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. + */ + export interface Schema$GoogleCloudAiplatformV1Content { /** - * Customer-managed encryption key options for a BatchPredictionJob. If this is set, then all resources created by the BatchPredictionJob will be encrypted with the provided encryption key. + * Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; - /** - * Output only. Time when the BatchPredictionJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. - */ - endTime?: string | null; + parts?: Schema$GoogleCloudAiplatformV1Part[]; /** - * Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. + * Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. */ - error?: Schema$GoogleRpcStatus; + role?: string | null; + } + /** + * Instance of a general context. + */ + export interface Schema$GoogleCloudAiplatformV1Context { /** - * Explanation configuration for this BatchPredictionJob. Can be specified only if generate_explanation is set to `true`. This value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of the explanation_spec object is not populated, the corresponding field of the Model.explanation_spec object is inherited. + * Output only. Timestamp when this Context was created. */ - explanationSpec?: Schema$GoogleCloudAiplatformV1ExplanationSpec; + createTime?: string | null; /** - * Generate explanation with the batch prediction results. When set to `true`, the batch prediction output changes based on the `predictions_format` field of the BatchPredictionJob.output_config object: * `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the Explanation object. * `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the Explanation object. * `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_spec must be populated. + * Description of the Context */ - generateExplanation?: boolean | null; + description?: string | null; /** - * Required. Input configuration of the instances on which predictions are performed. The schema of any single instance may be specified via the Model's PredictSchemata's instance_schema_uri. + * User provided display name of the Context. May be up to 128 Unicode characters. */ - inputConfig?: Schema$GoogleCloudAiplatformV1BatchPredictionJobInputConfig; + displayName?: string | null; /** - * Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. + * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - instanceConfig?: Schema$GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig; + etag?: string | null; /** - * The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). */ labels?: {[key: string]: string} | null; /** - * Immutable. Parameters configuring the batch behavior. Currently only applicable when dedicated_resources are used (in other cases Vertex AI does the tuning itself). - */ - manualBatchTuningParameters?: Schema$GoogleCloudAiplatformV1ManualBatchTuningParameters; - /** - * The name of the Model resource that produces the predictions via this job, must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. The model resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. The model resource could also be a publisher model. Example: `publishers/{publisher\}/models/{model\}` or `projects/{project\}/locations/{location\}/publishers/{publisher\}/models/{model\}` - */ - model?: string | null; - /** - * The parameters that govern the predictions. The schema of the parameters may be specified via the Model's PredictSchemata's parameters_schema_uri. - */ - modelParameters?: any | null; - /** - * Output only. The version ID of the Model that produces the predictions via this job. + * Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. */ - modelVersionId?: string | null; + metadata?: {[key: string]: any} | null; /** - * Output only. Resource name of the BatchPredictionJob. + * Immutable. The resource name of the Context. */ name?: string | null; /** - * Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of Model's PredictSchemata's instance_schema_uri and prediction_schema_uri. + * Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. */ - outputConfig?: Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputConfig; + parentContexts?: string[] | null; /** - * Output only. Information further describing the output of this job. + * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - outputInfo?: Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputInfo; + schemaTitle?: string | null; /** - * Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard Google Cloud error details. + * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - partialFailures?: Schema$GoogleRpcStatus[]; + schemaVersion?: string | null; /** - * Output only. Information about resources that had been consumed by this job. Provided in real time at best effort basis, as well as a final value once the job completes. Note: This field currently may be not populated for batch predictions that use AutoML Models. + * Output only. Timestamp when this Context was last updated. */ - resourcesConsumed?: Schema$GoogleCloudAiplatformV1ResourcesConsumed; + updateTime?: string | null; + } + /** + * Details of ModelService.CopyModel operation. + */ + export interface Schema$GoogleCloudAiplatformV1CopyModelOperationMetadata { /** - * The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + * The common part of the operation metadata. */ - serviceAccount?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Request message for ModelService.CopyModel. + */ + export interface Schema$GoogleCloudAiplatformV1CopyModelRequest { /** - * Output only. Time when the BatchPredictionJob for the first time entered the `JOB_STATE_RUNNING` state. + * Customer-managed encryption key options. If this is set, then the Model copy will be encrypted with the provided encryption key. */ - startTime?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * Output only. The detailed state of the job. + * Optional. Copy source_model into a new Model with this ID. The ID will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. */ - state?: string | null; + modelId?: string | null; /** - * Contains model information necessary to perform batch prediction without requiring uploading to model registry. Exactly one of model and unmanaged_container_model must be set. + * Optional. Specify this field to copy source_model into this existing Model as a new version. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - unmanagedContainerModel?: Schema$GoogleCloudAiplatformV1UnmanagedContainerModel; + parentModel?: string | null; /** - * Output only. Time when the BatchPredictionJob was most recently updated. + * Required. The resource name of the Model to copy. That Model must be in the same Project. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - updateTime?: string | null; + sourceModel?: string | null; } /** - * Configures the input to BatchPredictionJob. See Model.supported_input_storage_formats for Model's supported input formats, and how instances should be expressed via any of them. + * Response message of ModelService.CopyModel operation. */ - export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobInputConfig { - /** - * The BigQuery location of the input table. The schema of the table should be in the format described by the given context OpenAPI Schema, if one is provided. The table may contain additional columns that are not described by the schema, and they will be ignored. - */ - bigquerySource?: Schema$GoogleCloudAiplatformV1BigQuerySource; + export interface Schema$GoogleCloudAiplatformV1CopyModelResponse { /** - * The Cloud Storage location for the input instances. + * The name of the copied Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; + model?: string | null; /** - * Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. + * Output only. The version ID of the model that is copied. */ - instancesFormat?: string | null; + modelVersionId?: string | null; } /** - * Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. + * Request message for PredictionService.CountTokens. */ - export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig { - /** - * Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. - */ - excludedFields?: string[] | null; + export interface Schema$GoogleCloudAiplatformV1CountTokensRequest { /** - * Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. + * Required. Input content. */ - includedFields?: string[] | null; + contents?: Schema$GoogleCloudAiplatformV1Content[]; /** - * The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the file. + * Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. */ - instanceType?: string | null; + instances?: any[] | null; /** - * The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in excluded_fields. In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + * Required. The name of the publisher model requested to serve the prediction. Format: `projects/{project\}/locations/{location\}/publishers/x/models/x` */ - keyField?: string | null; + model?: string | null; } /** - * Configures the output of BatchPredictionJob. See Model.supported_output_storage_formats for supported output formats, and how predictions are expressed via any of them. + * Response message for PredictionService.CountTokens. */ - export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputConfig { - /** - * The BigQuery project or dataset location where the output is to be written to. If project is provided, a new dataset is created with name `prediction__` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both instance and prediction schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has google.rpc.Status represented as a STRUCT, and containing only `code` and `message`. - */ - bigqueryDestination?: Schema$GoogleCloudAiplatformV1BigQueryDestination; + export interface Schema$GoogleCloudAiplatformV1CountTokensResponse { /** - * The Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., `predictions_N.` are created where `` depends on chosen predictions_format, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both instance and prediction schemata defined then each such file contains predictions as per the predictions_format. If prediction for any instance failed (partially or completely), then an additional `errors_0001.`, `errors_0002.`,..., `errors_N.` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has google.rpc.Status containing only `code` and `message` fields. + * The total number of billable characters counted across all instances from the request. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; + totalBillableCharacters?: number | null; /** - * Required. The format in which Vertex AI gives the predictions, must be one of the Model's supported_output_storage_formats. + * The total number of tokens counted across all instances from the request. */ - predictionsFormat?: string | null; + totalTokens?: number | null; } /** - * Further describes this job's output. Supplements output_config. + * Runtime operation information for DatasetService.CreateDataset. */ - export interface Schema$GoogleCloudAiplatformV1BatchPredictionJobOutputInfo { - /** - * Output only. The path of the BigQuery dataset created, in `bq://projectId.bqDatasetId` format, into which the prediction output is written. - */ - bigqueryOutputDataset?: string | null; - /** - * Output only. The name of the BigQuery table created, in `predictions_` format, into which the prediction output is written. Can be used by UI to generate the BigQuery output path, for example. - */ - bigqueryOutputTable?: string | null; + export interface Schema$GoogleCloudAiplatformV1CreateDatasetOperationMetadata { /** - * Output only. The full path of the Cloud Storage directory created, into which the prediction output is written. + * The operation generic information. */ - gcsOutputDirectory?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Details of operations that batch reads Feature values. + * Runtime operation information for DatasetService.CreateDatasetVersion. */ - export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1CreateDatasetVersionOperationMetadata { /** - * Operation metadata for Featurestore batch read Features values. + * The common part of the operation metadata. */ genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Request message for FeaturestoreService.BatchReadFeatureValues. + * Runtime operation information for CreateDeploymentResourcePool method. */ - export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { - /** - * Similar to csv_read_instances, but from BigQuery source. - */ - bigqueryReadInstances?: Schema$GoogleCloudAiplatformV1BigQuerySource; - /** - * Each read instance consists of exactly one read timestamp and one or more entity IDs identifying entities of the corresponding EntityTypes whose Features are requested. Each output instance contains Feature values of requested entities concatenated together as of the read time. An example read instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z`. An example output instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each read instance must be millisecond-aligned. `csv_read_instances` are read instances stored in a plain-text CSV file. The header should be: [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in any order. Values in the timestamp column must use the RFC 3339 format, e.g. `2012-07-30T10:43:17.123Z`. - */ - csvReadInstances?: Schema$GoogleCloudAiplatformV1CsvSource; - /** - * Required. Specifies output location and format. - */ - destination?: Schema$GoogleCloudAiplatformV1FeatureValueDestination; - /** - * Required. Specifies EntityType grouping Features to read values of and settings. - */ - entityTypeSpecs?: Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec[]; - /** - * When not empty, the specified fields in the *_read_instances source will be joined as-is in the output, in addition to those fields from the Featurestore Entity. For BigQuery source, the type of the pass-through values will be automatically inferred. For CSV source, the pass-through values will be passed as opaque bytes. - */ - passThroughFields?: Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField[]; + export interface Schema$GoogleCloudAiplatformV1CreateDeploymentResourcePoolOperationMetadata { /** - * Optional. Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. + * The operation generic information. */ - startTime?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Selects Features of an EntityType to read values of and specifies read settings. + * Request message for CreateDeploymentResourcePool method. */ - export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec { - /** - * Required. ID of the EntityType to select Features. The EntityType id is the entity_type_id specified during EntityType creation. - */ - entityTypeId?: string | null; + export interface Schema$GoogleCloudAiplatformV1CreateDeploymentResourcePoolRequest { /** - * Required. Selectors choosing which Feature values to read from the EntityType. + * Required. The DeploymentResourcePool to create. */ - featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; + deploymentResourcePool?: Schema$GoogleCloudAiplatformV1DeploymentResourcePool; /** - * Per-Feature settings for the batch read. + * Required. The ID to use for the DeploymentResourcePool, which will become the final component of the DeploymentResourcePool's resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61\}[a-z0-9])?$/`. */ - settings?: Schema$GoogleCloudAiplatformV1DestinationFeatureSetting[]; + deploymentResourcePoolId?: string | null; } /** - * Describe pass-through fields in read_instance source. + * Runtime operation information for EndpointService.CreateEndpoint. */ - export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField { + export interface Schema$GoogleCloudAiplatformV1CreateEndpointOperationMetadata { /** - * Required. The name of the field in the CSV header or the name of the column in BigQuery table. The naming restriction is the same as Feature.name. + * The operation generic information. */ - fieldName?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Response message for FeaturestoreService.BatchReadFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1BatchReadFeatureValuesResponse {} - /** - * Response message for TensorboardService.BatchReadTensorboardTimeSeriesData. + * Details of operations that perform create EntityType. */ - export interface Schema$GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { + export interface Schema$GoogleCloudAiplatformV1CreateEntityTypeOperationMetadata { /** - * The returned time series data. + * Operation metadata for EntityType. */ - timeSeriesData?: Schema$GoogleCloudAiplatformV1TimeSeriesData[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * The BigQuery location for the output content. + * Details of operations that perform create FeatureGroup. */ - export interface Schema$GoogleCloudAiplatformV1BigQueryDestination { + export interface Schema$GoogleCloudAiplatformV1CreateFeatureGroupOperationMetadata { /** - * Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + * Operation metadata for FeatureGroup. */ - outputUri?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * The BigQuery location for the input content. + * Details of operations that perform create FeatureOnlineStore. */ - export interface Schema$GoogleCloudAiplatformV1BigQuerySource { + export interface Schema$GoogleCloudAiplatformV1CreateFeatureOnlineStoreOperationMetadata { /** - * Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. + * Operation metadata for FeatureOnlineStore. */ - inputUri?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Content blob. It's preferred to send as text directly rather than raw bytes. + * Details of operations that perform create Feature. */ - export interface Schema$GoogleCloudAiplatformV1Blob { + export interface Schema$GoogleCloudAiplatformV1CreateFeatureOperationMetadata { /** - * Required. Raw bytes. + * Operation metadata for Feature. */ - data?: string | null; - /** - * Required. The IANA standard MIME type of the source data. - */ - mimeType?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + * Request message for FeaturestoreService.CreateFeature. Request message for FeatureRegistryService.CreateFeature. */ - export interface Schema$GoogleCloudAiplatformV1BlurBaselineConfig { + export interface Schema$GoogleCloudAiplatformV1CreateFeatureRequest { /** - * The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + * Required. The Feature to create. */ - maxBlurSigma?: number | null; + feature?: Schema$GoogleCloudAiplatformV1Feature; + /** + * Required. The ID to use for the Feature, which will become the final component of the Feature's resource name. This value may be up to 128 characters, and valid characters are `[a-z0-9_]`. The first character cannot be a number. The value must be unique within an EntityType/FeatureGroup. + */ + featureId?: string | null; + /** + * Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` Format for feature_group as parent: `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}` + */ + parent?: string | null; } /** - * A list of boolean values. + * Details of operations that perform create Featurestore. */ - export interface Schema$GoogleCloudAiplatformV1BoolArray { + export interface Schema$GoogleCloudAiplatformV1CreateFeaturestoreOperationMetadata { /** - * A list of bool values. + * Operation metadata for Featurestore. */ - values?: boolean[] | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Request message for JobService.CancelBatchPredictionJob. - */ - export interface Schema$GoogleCloudAiplatformV1CancelBatchPredictionJobRequest {} - /** - * Request message for JobService.CancelCustomJob. - */ - export interface Schema$GoogleCloudAiplatformV1CancelCustomJobRequest {} - /** - * Request message for JobService.CancelDataLabelingJob. - */ - export interface Schema$GoogleCloudAiplatformV1CancelDataLabelingJobRequest {} - /** - * Request message for JobService.CancelHyperparameterTuningJob. - */ - export interface Schema$GoogleCloudAiplatformV1CancelHyperparameterTuningJobRequest {} - /** - * Request message for JobService.CancelNasJob. - */ - export interface Schema$GoogleCloudAiplatformV1CancelNasJobRequest {} - /** - * Request message for PipelineService.CancelPipelineJob. - */ - export interface Schema$GoogleCloudAiplatformV1CancelPipelineJobRequest {} - /** - * Request message for PipelineService.CancelTrainingPipeline. - */ - export interface Schema$GoogleCloudAiplatformV1CancelTrainingPipelineRequest {} - /** - * Request message for GenAiTuningService.CancelTuningJob. + * Details of operations that perform create FeatureView. */ - export interface Schema$GoogleCloudAiplatformV1CancelTuningJobRequest {} + export interface Schema$GoogleCloudAiplatformV1CreateFeatureViewOperationMetadata { + /** + * Operation metadata for FeatureView Create. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } /** - * A response candidate generated from the model. + * Runtime operation information for IndexEndpointService.CreateIndexEndpoint. */ - export interface Schema$GoogleCloudAiplatformV1Candidate { + export interface Schema$GoogleCloudAiplatformV1CreateIndexEndpointOperationMetadata { /** - * Output only. Source attribution of the generated content. + * The operation generic information. */ - citationMetadata?: Schema$GoogleCloudAiplatformV1CitationMetadata; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Runtime operation information for IndexService.CreateIndex. + */ + export interface Schema$GoogleCloudAiplatformV1CreateIndexOperationMetadata { /** - * Output only. Content parts of the candidate. + * The operation generic information. */ - content?: Schema$GoogleCloudAiplatformV1Content; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. + * The operation metadata with regard to Matching Engine Index operation. */ - finishMessage?: string | null; + nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata; + } + /** + * Details of operations that perform MetadataService.CreateMetadataStore. + */ + export interface Schema$GoogleCloudAiplatformV1CreateMetadataStoreOperationMetadata { /** - * Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. + * Operation metadata for creating a MetadataStore. */ - finishReason?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Metadata information for NotebookService.CreateNotebookRuntimeTemplate. + */ + export interface Schema$GoogleCloudAiplatformV1CreateNotebookRuntimeTemplateOperationMetadata { /** - * Output only. Metadata specifies sources used to ground generated content. + * The operation generic information. */ - groundingMetadata?: Schema$GoogleCloudAiplatformV1GroundingMetadata; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Details of operations that perform create PersistentResource. + */ + export interface Schema$GoogleCloudAiplatformV1CreatePersistentResourceOperationMetadata { /** - * Output only. Index of the candidate. + * Operation metadata for PersistentResource. */ - index?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. + * Progress Message for Create LRO */ - safetyRatings?: Schema$GoogleCloudAiplatformV1SafetyRating[]; + progressMessage?: string | null; } /** - * This message will be placed in the metadata field of a google.longrunning.Operation associated with a CheckTrialEarlyStoppingState request. + * Request message for PipelineService.CreatePipelineJob. */ - export interface Schema$GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateMetatdata { + export interface Schema$GoogleCloudAiplatformV1CreatePipelineJobRequest { /** - * Operation metadata for suggesting Trials. + * Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project\}/locations/{location\}` */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + parent?: string | null; /** - * The name of the Study that the Trial belongs to. + * Required. The PipelineJob to create. */ - study?: string | null; + pipelineJob?: Schema$GoogleCloudAiplatformV1PipelineJob; /** - * The Trial name. + * The ID to use for the PipelineJob, which will become the final component of the PipelineJob name. If not provided, an ID will be automatically generated. This value should be less than 128 characters, and valid characters are `/a-z-/`. */ - trial?: string | null; + pipelineJobId?: string | null; } /** - * Request message for VizierService.CheckTrialEarlyStoppingState. - */ - export interface Schema$GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateRequest {} - /** - * Response message for VizierService.CheckTrialEarlyStoppingState. + * Details of operations that perform create FeatureGroup. */ - export interface Schema$GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateResponse { + export interface Schema$GoogleCloudAiplatformV1CreateRegistryFeatureOperationMetadata { /** - * True if the Trial should stop. + * Operation metadata for Feature. */ - shouldStop?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Source attributions for content. + * Runtime operation information for SpecialistPoolService.CreateSpecialistPool. */ - export interface Schema$GoogleCloudAiplatformV1Citation { - /** - * Output only. End index into the content. - */ - endIndex?: number | null; - /** - * Output only. License of the attribution. - */ - license?: string | null; - /** - * Output only. Publication date of the attribution. - */ - publicationDate?: Schema$GoogleTypeDate; - /** - * Output only. Start index into the content. - */ - startIndex?: number | null; - /** - * Output only. Title of the attribution. - */ - title?: string | null; + export interface Schema$GoogleCloudAiplatformV1CreateSpecialistPoolOperationMetadata { /** - * Output only. Url reference of the attribution. + * The operation generic information. */ - uri?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * A collection of source attributions for a piece of content. + * Details of operations that perform create Tensorboard. */ - export interface Schema$GoogleCloudAiplatformV1CitationMetadata { + export interface Schema$GoogleCloudAiplatformV1CreateTensorboardOperationMetadata { /** - * Output only. List of citations. + * Operation metadata for Tensorboard. */ - citations?: Schema$GoogleCloudAiplatformV1Citation[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Request message for VizierService.CompleteTrial. + * Request message for TensorboardService.CreateTensorboardRun. */ - export interface Schema$GoogleCloudAiplatformV1CompleteTrialRequest { + export interface Schema$GoogleCloudAiplatformV1CreateTensorboardRunRequest { /** - * Optional. If provided, it will be used as the completed Trial's final_measurement; Otherwise, the service will auto-select a previously reported measurement as the final-measurement + * Required. The resource name of the TensorboardExperiment to create the TensorboardRun in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` */ - finalMeasurement?: Schema$GoogleCloudAiplatformV1Measurement; + parent?: string | null; /** - * Optional. A human readable reason why the trial was infeasible. This should only be provided if `trial_infeasible` is true. + * Required. The TensorboardRun to create. */ - infeasibleReason?: string | null; + tensorboardRun?: Schema$GoogleCloudAiplatformV1TensorboardRun; /** - * Optional. True if the Trial cannot be run with the given Parameter, and final_measurement will be ignored. + * Required. The ID to use for the Tensorboard run, which becomes the final component of the Tensorboard run's resource name. This value should be 1-128 characters, and valid characters are `/a-z-/`. */ - trialInfeasible?: boolean | null; + tensorboardRunId?: string | null; } /** - * Success and error statistics of processing multiple entities (for example, DataItems or structured data rows) in batch. + * Request message for TensorboardService.CreateTensorboardTimeSeries. */ - export interface Schema$GoogleCloudAiplatformV1CompletionStats { - /** - * Output only. The number of entities for which any error was encountered. - */ - failedCount?: string | null; + export interface Schema$GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { /** - * Output only. In cases when enough errors are encountered a job, pipeline, or operation may be failed as a whole. Below is the number of entities for which the processing had not been finished (either in successful or failed state). Set to -1 if the number is unknown (for example, the operation failed before the total entity number could be collected). + * Required. The resource name of the TensorboardRun to create the TensorboardTimeSeries in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` */ - incompleteCount?: string | null; + parent?: string | null; /** - * Output only. The number of entities that had been processed successfully. + * Required. The TensorboardTimeSeries to create. */ - successfulCount?: string | null; + tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1TensorboardTimeSeries; /** - * Output only. The number of the successful forecast points that are generated by the forecasting model. This is ONLY used by the forecasting batch prediction. + * Optional. The user specified unique ID to use for the TensorboardTimeSeries, which becomes the final component of the TensorboardTimeSeries's resource name. This value should match "a-z0-9{0, 127\}" */ - successfulForecastPointCount?: string | null; + tensorboardTimeSeriesId?: string | null; } /** - * Request message for ComputeTokens RPC call. + * The storage details for CSV output content. */ - export interface Schema$GoogleCloudAiplatformV1ComputeTokensRequest { + export interface Schema$GoogleCloudAiplatformV1CsvDestination { /** - * Required. The instances that are the input to token computing API call. Schema is identical to the prediction schema of the text model, even for the non-text models, like chat models, or Codey models. + * Required. Google Cloud Storage location. */ - instances?: any[] | null; + gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; } /** - * Response message for ComputeTokens RPC call. + * The storage details for CSV input content. */ - export interface Schema$GoogleCloudAiplatformV1ComputeTokensResponse { + export interface Schema$GoogleCloudAiplatformV1CsvSource { /** - * Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances. + * Required. Google Cloud Storage location. */ - tokensInfo?: Schema$GoogleCloudAiplatformV1TokensInfo[]; + gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; } /** - * The Container Registry location for the container image. + * Represents a job that runs custom workloads such as a Docker container or a Python package. A CustomJob can have multiple worker pools and each worker pool can have its own machine and input spec. A CustomJob will be cleaned up once the job enters terminal state (failed or succeeded). */ - export interface Schema$GoogleCloudAiplatformV1ContainerRegistryDestination { + export interface Schema$GoogleCloudAiplatformV1CustomJob { /** - * Required. Container Registry URI of a container image. Only Google Container Registry and Artifact Registry are supported now. Accepted forms: * Google Container Registry path. For example: `gcr.io/projectId/imageName:tag`. * Artifact Registry path. For example: `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. If a tag is not specified, "latest" will be used as the default tag. + * Output only. Time when the CustomJob was created. */ - outputUri?: string | null; - } - /** - * The spec of a Container. - */ - export interface Schema$GoogleCloudAiplatformV1ContainerSpec { + createTime?: string | null; /** - * The arguments to be passed when starting the container. + * Required. The display name of the CustomJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - args?: string[] | null; + displayName?: string | null; /** - * The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided. + * Customer-managed encryption key options for a CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. */ - command?: string[] | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * Environment variables to be passed to the container. Maximum limit is 100. + * Output only. Time when the CustomJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ - env?: Schema$GoogleCloudAiplatformV1EnvVar[]; + endTime?: string | null; /** - * Required. The URI of a container image in the Container Registry that is to be run on each worker replica. + * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ - imageUri?: string | null; - } - /** - * The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. - */ - export interface Schema$GoogleCloudAiplatformV1Content { + error?: Schema$GoogleRpcStatus; /** - * Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + * Required. Job spec. */ - parts?: Schema$GoogleCloudAiplatformV1Part[]; + jobSpec?: Schema$GoogleCloudAiplatformV1CustomJobSpec; /** - * Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + * The labels with user-defined metadata to organize CustomJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - role?: string | null; - } - /** - * Instance of a general context. - */ - export interface Schema$GoogleCloudAiplatformV1Context { + labels?: {[key: string]: string} | null; /** - * Output only. Timestamp when this Context was created. + * Output only. Resource name of a CustomJob. */ - createTime?: string | null; + name?: string | null; /** - * Description of the Context + * Output only. Time when the CustomJob for the first time entered the `JOB_STATE_RUNNING` state. */ - description?: string | null; + startTime?: string | null; /** - * User provided display name of the Context. May be up to 128 Unicode characters. + * Output only. The detailed state of the job. */ - displayName?: string | null; + state?: string | null; /** - * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Output only. Time when the CustomJob was most recently updated. */ - etag?: string | null; + updateTime?: string | null; /** - * The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if job_spec.enable_web_access is `true`. The keys are names of each node in the training job; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. */ - labels?: {[key: string]: string} | null; + webAccessUris?: {[key: string]: string} | null; + } + /** + * Represents the spec of a CustomJob. + */ + export interface Schema$GoogleCloudAiplatformV1CustomJobSpec { /** - * Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + * The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` */ - metadata?: {[key: string]: any} | null; + baseOutputDirectory?: Schema$GoogleCloudAiplatformV1GcsDestination; /** - * Immutable. The resource name of the Context. + * Optional. Whether you want Vertex AI to enable access to the customized dashboard in training chief container. If set to `true`, you can access the dashboard at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). */ - name?: string | null; + enableDashboardAccess?: boolean | null; /** - * Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + * Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). */ - parentContexts?: string[] | null; + enableWebAccess?: boolean | null; /** - * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Optional. The Experiment associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}` */ - schemaTitle?: string | null; + experiment?: string | null; /** - * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Optional. The Experiment Run associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}-{experiment-run-name\}` */ - schemaVersion?: string | null; + experimentRun?: string | null; /** - * Output only. Timestamp when this Context was last updated. + * Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project\}/locations/{location\}/models/{model\}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. */ - updateTime?: string | null; - } - /** - * Details of ModelService.CopyModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1CopyModelOperationMetadata { + models?: string[] | null; /** - * The common part of the operation metadata. + * Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Request message for ModelService.CopyModel. - */ - export interface Schema$GoogleCloudAiplatformV1CopyModelRequest { + network?: string | null; /** - * Customer-managed encryption key options. If this is set, then the Model copy will be encrypted with the provided encryption key. + * Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; + persistentResourceId?: string | null; /** - * Optional. Copy source_model into a new Model with this ID. The ID will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + * The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations */ - modelId?: string | null; + protectedArtifactLocationId?: string | null; /** - * Optional. Specify this field to copy source_model into this existing Model as a new version. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. */ - parentModel?: string | null; + reservedIpRanges?: string[] | null; /** - * Required. The resource name of the Model to copy. That Model must be in the same Project. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Scheduling options for a CustomJob. */ - sourceModel?: string | null; - } - /** - * Response message of ModelService.CopyModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1CopyModelResponse { + scheduling?: Schema$GoogleCloudAiplatformV1Scheduling; /** - * The name of the copied Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. If unspecified, the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project is used. */ - model?: string | null; + serviceAccount?: string | null; /** - * Output only. The version ID of the model that is copied. + * Optional. The name of a Vertex AI Tensorboard resource to which this CustomJob will upload Tensorboard logs. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` */ - modelVersionId?: string | null; + tensorboard?: string | null; + /** + * Required. The spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. + */ + workerPoolSpecs?: Schema$GoogleCloudAiplatformV1WorkerPoolSpec[]; } /** - * Request message for PredictionService.CountTokens. + * A piece of data in a Dataset. Could be an image, a video, a document or plain text. */ - export interface Schema$GoogleCloudAiplatformV1CountTokensRequest { + export interface Schema$GoogleCloudAiplatformV1DataItem { /** - * Required. Input content. + * Output only. Timestamp when this DataItem was created. */ - contents?: Schema$GoogleCloudAiplatformV1Content[]; + createTime?: string | null; /** - * Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - instances?: any[] | null; + etag?: string | null; /** - * Required. The name of the publisher model requested to serve the prediction. Format: `projects/{project\}/locations/{location\}/publishers/x/models/x` + * Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one DataItem(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - model?: string | null; - } - /** - * Response message for PredictionService.CountTokens. - */ - export interface Schema$GoogleCloudAiplatformV1CountTokensResponse { + labels?: {[key: string]: string} | null; /** - * The total number of billable characters counted across all instances from the request. + * Output only. The resource name of the DataItem. */ - totalBillableCharacters?: number | null; + name?: string | null; /** - * The total number of tokens counted across all instances from the request. + * Required. The data that the DataItem represents (for example, an image or a text snippet). The schema of the payload is stored in the parent Dataset's metadata schema's dataItemSchemaUri field. */ - totalTokens?: number | null; - } - /** - * Runtime operation information for DatasetService.CreateDataset. - */ - export interface Schema$GoogleCloudAiplatformV1CreateDatasetOperationMetadata { + payload?: any | null; /** - * The operation generic information. + * Output only. Timestamp when this DataItem was last updated. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + updateTime?: string | null; } /** - * Runtime operation information for DatasetService.CreateDatasetVersion. + * A container for a single DataItem and Annotations on it. */ - export interface Schema$GoogleCloudAiplatformV1CreateDatasetVersionOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1DataItemView { /** - * The common part of the operation metadata. + * The Annotations on the DataItem. If too many Annotations should be returned for the DataItem, this field will be truncated per annotations_limit in request. If it was, then the has_truncated_annotations will be set to true. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Runtime operation information for CreateDeploymentResourcePool method. - */ - export interface Schema$GoogleCloudAiplatformV1CreateDeploymentResourcePoolOperationMetadata { + annotations?: Schema$GoogleCloudAiplatformV1Annotation[]; /** - * The operation generic information. + * The DataItem. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + dataItem?: Schema$GoogleCloudAiplatformV1DataItem; + /** + * True if and only if the Annotations field has been truncated. It happens if more Annotations for this DataItem met the request's annotation_filter than are allowed to be returned by annotations_limit. Note that if Annotations field is not being returned due to field mask, then this field will not be set to true no matter how many Annotations are there. + */ + hasTruncatedAnnotations?: boolean | null; } /** - * Request message for CreateDeploymentResourcePool method. + * DataLabelingJob is used to trigger a human labeling job on unlabeled data from the following Dataset: */ - export interface Schema$GoogleCloudAiplatformV1CreateDeploymentResourcePoolRequest { + export interface Schema$GoogleCloudAiplatformV1DataLabelingJob { /** - * Required. The DeploymentResourcePool to create. + * Parameters that configure the active learning pipeline. Active learning will label the data incrementally via several iterations. For every iteration, it will select a batch of data based on the sampling strategy. */ - deploymentResourcePool?: Schema$GoogleCloudAiplatformV1DeploymentResourcePool; + activeLearningConfig?: Schema$GoogleCloudAiplatformV1ActiveLearningConfig; /** - * Required. The ID to use for the DeploymentResourcePool, which will become the final component of the DeploymentResourcePool's resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61\}[a-z0-9])?$/`. + * Labels to assign to annotations generated by this DataLabelingJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - deploymentResourcePoolId?: string | null; - } - /** - * Runtime operation information for EndpointService.CreateEndpoint. - */ - export interface Schema$GoogleCloudAiplatformV1CreateEndpointOperationMetadata { + annotationLabels?: {[key: string]: string} | null; /** - * The operation generic information. + * Output only. Timestamp when this DataLabelingJob was created. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform create EntityType. - */ - export interface Schema$GoogleCloudAiplatformV1CreateEntityTypeOperationMetadata { + createTime?: string | null; /** - * Operation metadata for EntityType. + * Output only. Estimated cost(in US dollars) that the DataLabelingJob has incurred to date. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform create FeatureGroup. - */ - export interface Schema$GoogleCloudAiplatformV1CreateFeatureGroupOperationMetadata { + currentSpend?: Schema$GoogleTypeMoney; /** - * Operation metadata for FeatureGroup. + * Required. Dataset resource names. Right now we only support labeling from a single Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}` */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform create FeatureOnlineStore. - */ - export interface Schema$GoogleCloudAiplatformV1CreateFeatureOnlineStoreOperationMetadata { + datasets?: string[] | null; /** - * Operation metadata for FeatureOnlineStore. + * Required. The user-defined name of the DataLabelingJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a DataLabelingJob. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform create Feature. - */ - export interface Schema$GoogleCloudAiplatformV1CreateFeatureOperationMetadata { + displayName?: string | null; /** - * Operation metadata for Feature. + * Customer-managed encryption key spec for a DataLabelingJob. If set, this DataLabelingJob will be secured by this key. Note: Annotations created in the DataLabelingJob are associated with the EncryptionSpec of the Dataset they are exported to. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Request message for FeaturestoreService.CreateFeature. Request message for FeatureRegistryService.CreateFeature. - */ - export interface Schema$GoogleCloudAiplatformV1CreateFeatureRequest { + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * Required. The Feature to create. + * Output only. DataLabelingJob errors. It is only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ - feature?: Schema$GoogleCloudAiplatformV1Feature; + error?: Schema$GoogleRpcStatus; /** - * Required. The ID to use for the Feature, which will become the final component of the Feature's resource name. This value may be up to 128 characters, and valid characters are `[a-z0-9_]`. The first character cannot be a number. The value must be unique within an EntityType/FeatureGroup. + * Required. Input config parameters for the DataLabelingJob. */ - featureId?: string | null; + inputs?: any | null; /** - * Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` Format for feature_group as parent: `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}` + * Required. Points to a YAML file stored on Google Cloud Storage describing the config for a specific type of DataLabelingJob. The schema files that can be used here are found in the https://storage.googleapis.com/google-cloud-aiplatform bucket in the /schema/datalabelingjob/inputs/ folder. */ - parent?: string | null; - } - /** - * Details of operations that perform create Featurestore. - */ - export interface Schema$GoogleCloudAiplatformV1CreateFeaturestoreOperationMetadata { - /** - * Operation metadata for Featurestore. - */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform create FeatureView. - */ - export interface Schema$GoogleCloudAiplatformV1CreateFeatureViewOperationMetadata { + inputsSchemaUri?: string | null; /** - * Operation metadata for FeatureView Create. + * Required. The Google Cloud Storage location of the instruction pdf. This pdf is shared with labelers, and provides detailed description on how to label DataItems in Datasets. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Runtime operation information for IndexEndpointService.CreateIndexEndpoint. - */ - export interface Schema$GoogleCloudAiplatformV1CreateIndexEndpointOperationMetadata { + instructionUri?: string | null; /** - * The operation generic information. + * Required. Number of labelers to work on each DataItem. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Runtime operation information for IndexService.CreateIndex. - */ - export interface Schema$GoogleCloudAiplatformV1CreateIndexOperationMetadata { + labelerCount?: number | null; /** - * The operation generic information. + * Output only. Current labeling job progress percentage scaled in interval [0, 100], indicating the percentage of DataItems that has been finished. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + labelingProgress?: number | null; /** - * The operation metadata with regard to Matching Engine Index operation. + * The labels with user-defined metadata to organize your DataLabelingJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each DataLabelingJob: * "aiplatform.googleapis.com/schema": output only, its value is the inputs_schema's title. */ - nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata; - } - /** - * Details of operations that perform MetadataService.CreateMetadataStore. - */ - export interface Schema$GoogleCloudAiplatformV1CreateMetadataStoreOperationMetadata { + labels?: {[key: string]: string} | null; /** - * Operation metadata for creating a MetadataStore. + * Output only. Resource name of the DataLabelingJob. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Metadata information for NotebookService.CreateNotebookRuntimeTemplate. - */ - export interface Schema$GoogleCloudAiplatformV1CreateNotebookRuntimeTemplateOperationMetadata { + name?: string | null; /** - * The operation generic information. + * The SpecialistPools' resource names associated with this job. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform create PersistentResource. - */ - export interface Schema$GoogleCloudAiplatformV1CreatePersistentResourceOperationMetadata { + specialistPools?: string[] | null; /** - * Operation metadata for PersistentResource. + * Output only. The detailed state of the job. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + state?: string | null; /** - * Progress Message for Create LRO + * Output only. Timestamp when this DataLabelingJob was updated most recently. */ - progressMessage?: string | null; + updateTime?: string | null; } /** - * Request message for PipelineService.CreatePipelineJob. + * A collection of DataItems and Annotations on them. */ - export interface Schema$GoogleCloudAiplatformV1CreatePipelineJobRequest { + export interface Schema$GoogleCloudAiplatformV1Dataset { /** - * Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project\}/locations/{location\}` + * Output only. Timestamp when this Dataset was created. */ - parent?: string | null; + createTime?: string | null; /** - * Required. The PipelineJob to create. + * Output only. The number of DataItems in this Dataset. Only apply for non-structured Dataset. */ - pipelineJob?: Schema$GoogleCloudAiplatformV1PipelineJob; + dataItemCount?: string | null; /** - * The ID to use for the PipelineJob, which will become the final component of the PipelineJob name. If not provided, an ID will be automatically generated. This value should be less than 128 characters, and valid characters are `/a-z-/`. + * The description of the Dataset. */ - pipelineJobId?: string | null; - } - /** - * Details of operations that perform create FeatureGroup. - */ - export interface Schema$GoogleCloudAiplatformV1CreateRegistryFeatureOperationMetadata { + description?: string | null; /** - * Operation metadata for Feature. + * Required. The user-defined name of the Dataset. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Runtime operation information for SpecialistPoolService.CreateSpecialistPool. - */ - export interface Schema$GoogleCloudAiplatformV1CreateSpecialistPoolOperationMetadata { + displayName?: string | null; /** - * The operation generic information. + * Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform create Tensorboard. - */ - export interface Schema$GoogleCloudAiplatformV1CreateTensorboardOperationMetadata { + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * Operation metadata for Tensorboard. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Request message for TensorboardService.CreateTensorboardRun. - */ - export interface Schema$GoogleCloudAiplatformV1CreateTensorboardRunRequest { + etag?: string | null; /** - * Required. The resource name of the TensorboardExperiment to create the TensorboardRun in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` + * The labels with user-defined metadata to organize your Datasets. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Dataset: * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its value is the metadata_schema's title. */ - parent?: string | null; + labels?: {[key: string]: string} | null; /** - * Required. The TensorboardRun to create. + * Required. Additional information about the Dataset. */ - tensorboardRun?: Schema$GoogleCloudAiplatformV1TensorboardRun; + metadata?: any | null; /** - * Required. The ID to use for the Tensorboard run, which becomes the final component of the Tensorboard run's resource name. This value should be 1-128 characters, and valid characters are `/a-z-/`. + * Output only. The resource name of the Artifact that was created in MetadataStore when creating the Dataset. The Artifact resource name pattern is `projects/{project\}/locations/{location\}/metadataStores/{metadata_store\}/artifacts/{artifact\}`. */ - tensorboardRunId?: string | null; - } - /** - * Request message for TensorboardService.CreateTensorboardTimeSeries. - */ - export interface Schema$GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { + metadataArtifact?: string | null; /** - * Required. The resource name of the TensorboardRun to create the TensorboardTimeSeries in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` + * Required. Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/. */ - parent?: string | null; + metadataSchemaUri?: string | null; /** - * Required. The TensorboardTimeSeries to create. + * Optional. Reference to the public base model last used by the dataset. Only set for prompt datasets. */ - tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1TensorboardTimeSeries; + modelReference?: string | null; /** - * Optional. The user specified unique ID to use for the TensorboardTimeSeries, which becomes the final component of the TensorboardTimeSeries's resource name. This value should match "a-z0-9{0, 127\}" + * Output only. The resource name of the Dataset. */ - tensorboardTimeSeriesId?: string | null; - } - /** - * The storage details for CSV output content. - */ - export interface Schema$GoogleCloudAiplatformV1CsvDestination { + name?: string | null; /** - * Required. Google Cloud Storage location. + * All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will not be populated except for UI cases which will only use annotation_spec_count. In CreateDataset request, a SavedQuery is created together if this field is set, up to one SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not contain any AnnotationSpec. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; - } - /** - * The storage details for CSV input content. - */ - export interface Schema$GoogleCloudAiplatformV1CsvSource { + savedQueries?: Schema$GoogleCloudAiplatformV1SavedQuery[]; /** - * Required. Google Cloud Storage location. + * Output only. Timestamp when this Dataset was last updated. */ - gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; + updateTime?: string | null; } /** - * Represents a job that runs custom workloads such as a Docker container or a Python package. A CustomJob can have multiple worker pools and each worker pool can have its own machine and input spec. A CustomJob will be cleaned up once the job enters terminal state (failed or succeeded). + * Describes the dataset version. */ - export interface Schema$GoogleCloudAiplatformV1CustomJob { + export interface Schema$GoogleCloudAiplatformV1DatasetVersion { /** - * Output only. Time when the CustomJob was created. + * Output only. Name of the associated BigQuery dataset. + */ + bigQueryDatasetName?: string | null; + /** + * Output only. Timestamp when this DatasetVersion was created. */ createTime?: string | null; /** - * Required. The display name of the CustomJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * The user-defined name of the DatasetVersion. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string | null; /** - * Customer-managed encryption key options for a CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; + etag?: string | null; /** - * Output only. Time when the CustomJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + * Required. Output only. Additional information about the DatasetVersion. */ - endTime?: string | null; + metadata?: any | null; /** - * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + * Output only. Reference to the public base model last used by the dataset version. Only set for prompt dataset versions. */ - error?: Schema$GoogleRpcStatus; + modelReference?: string | null; /** - * Required. Job spec. + * Output only. The resource name of the DatasetVersion. */ - jobSpec?: Schema$GoogleCloudAiplatformV1CustomJobSpec; + name?: string | null; /** - * The labels with user-defined metadata to organize CustomJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Output only. Timestamp when this DatasetVersion was last updated. */ - labels?: {[key: string]: string} | null; + updateTime?: string | null; + } + /** + * A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. + */ + export interface Schema$GoogleCloudAiplatformV1DedicatedResources { /** - * Output only. Resource name of a CustomJob. + * Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. */ - name?: string | null; + autoscalingMetricSpecs?: Schema$GoogleCloudAiplatformV1AutoscalingMetricSpec[]; /** - * Output only. Time when the CustomJob for the first time entered the `JOB_STATE_RUNNING` state. + * Required. Immutable. The specification of a single machine used by the prediction. */ - startTime?: string | null; + machineSpec?: Schema$GoogleCloudAiplatformV1MachineSpec; /** - * Output only. The detailed state of the job. + * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). */ - state?: string | null; + maxReplicaCount?: number | null; /** - * Output only. Time when the CustomJob was most recently updated. + * Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. */ - updateTime?: string | null; + minReplicaCount?: number | null; + } + /** + * Details of operations that delete Feature values. + */ + export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesOperationMetadata { /** - * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if job_spec.enable_web_access is `true`. The keys are names of each node in the training job; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. + * Operation metadata for Featurestore delete Features values. */ - webAccessUris?: {[key: string]: string} | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Represents the spec of a CustomJob. + * Request message for FeaturestoreService.DeleteFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1CustomJobSpec { + export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequest { /** - * The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` + * Select feature values to be deleted by specifying entities. */ - baseOutputDirectory?: Schema$GoogleCloudAiplatformV1GcsDestination; + selectEntity?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity; /** - * Optional. Whether you want Vertex AI to enable access to the customized dashboard in training chief container. If set to `true`, you can access the dashboard at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + * Select feature values to be deleted by specifying time range and features. */ - enableDashboardAccess?: boolean | null; + selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature; + } + /** + * Message to select entity. If an entity id is selected, all the feature values corresponding to the entity id will be deleted, including the entityId. + */ + export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity { /** - * Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + * Required. Selectors choosing feature values of which entity id to be deleted from the EntityType. */ - enableWebAccess?: boolean | null; + entityIdSelector?: Schema$GoogleCloudAiplatformV1EntityIdSelector; + } + /** + * Message to select time range and feature. Values of the selected feature generated within an inclusive time range will be deleted. Using this option permanently deletes the feature values from the specified feature IDs within the specified time range. This might include data from the online storage. If you want to retain any deleted historical data in the online storage, you must re-ingest it. + */ + export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { /** - * Optional. The Experiment associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}` + * Required. Selectors choosing which feature values to be deleted from the EntityType. */ - experiment?: string | null; - /** - * Optional. The Experiment Run associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}-{experiment-run-name\}` - */ - experimentRun?: string | null; - /** - * Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project\}/locations/{location\}/models/{model\}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. - */ - models?: string[] | null; - /** - * Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - */ - network?: string | null; - /** - * Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - */ - persistentResourceId?: string | null; - /** - * The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations - */ - protectedArtifactLocationId?: string | null; - /** - * Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. - */ - reservedIpRanges?: string[] | null; + featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; /** - * Scheduling options for a CustomJob. + * If set, data will not be deleted from online storage. When time range is older than the data in online storage, setting this to be true will make the deletion have no impact on online serving. */ - scheduling?: Schema$GoogleCloudAiplatformV1Scheduling; + skipOnlineStorageDelete?: boolean | null; /** - * Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. If unspecified, the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project is used. + * Required. Select feature generated within a half-inclusive time range. The time range is lower inclusive and upper exclusive. */ - serviceAccount?: string | null; + timeRange?: Schema$GoogleTypeInterval; + } + /** + * Response message for FeaturestoreService.DeleteFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponse { /** - * Optional. The name of a Vertex AI Tensorboard resource to which this CustomJob will upload Tensorboard logs. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` + * Response for request specifying the entities to delete */ - tensorboard?: string | null; + selectEntity?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity; /** - * Required. The spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. + * Response for request specifying time range and feature */ - workerPoolSpecs?: Schema$GoogleCloudAiplatformV1WorkerPoolSpec[]; + selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature; } /** - * A piece of data in a Dataset. Could be an image, a video, a document or plain text. + * Response message if the request uses the SelectEntity option. */ - export interface Schema$GoogleCloudAiplatformV1DataItem { - /** - * Output only. Timestamp when this DataItem was created. - */ - createTime?: string | null; + export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The count of deleted entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. */ - etag?: string | null; + offlineStorageDeletedEntityRowCount?: string | null; /** - * Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one DataItem(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * The count of deleted entities in the online storage. Each entity ID corresponds to one entity. */ - labels?: {[key: string]: string} | null; + onlineStorageDeletedEntityCount?: string | null; + } + /** + * Response message if the request uses the SelectTimeRangeAndFeature option. + */ + export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { /** - * Output only. The resource name of the DataItem. + * The count of the features or columns impacted. This is the same as the feature count in the request. */ - name?: string | null; + impactedFeatureCount?: string | null; /** - * Required. The data that the DataItem represents (for example, an image or a text snippet). The schema of the payload is stored in the parent Dataset's metadata schema's dataItemSchemaUri field. + * The count of modified entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. Within each row, only the features specified in the request are deleted. */ - payload?: any | null; + offlineStorageModifiedEntityRowCount?: string | null; /** - * Output only. Timestamp when this DataItem was last updated. + * The count of modified entities in the online storage. Each entity ID corresponds to one entity. Within each entity, only the features specified in the request are deleted. */ - updateTime?: string | null; + onlineStorageModifiedEntityCount?: string | null; } /** - * A container for a single DataItem and Annotations on it. + * Details of operations that perform MetadataService.DeleteMetadataStore. */ - export interface Schema$GoogleCloudAiplatformV1DataItemView { - /** - * The Annotations on the DataItem. If too many Annotations should be returned for the DataItem, this field will be truncated per annotations_limit in request. If it was, then the has_truncated_annotations will be set to true. - */ - annotations?: Schema$GoogleCloudAiplatformV1Annotation[]; + export interface Schema$GoogleCloudAiplatformV1DeleteMetadataStoreOperationMetadata { /** - * The DataItem. + * Operation metadata for deleting a MetadataStore. */ - dataItem?: Schema$GoogleCloudAiplatformV1DataItem; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Details of operations that perform deletes of any entities. + */ + export interface Schema$GoogleCloudAiplatformV1DeleteOperationMetadata { /** - * True if and only if the Annotations field has been truncated. It happens if more Annotations for this DataItem met the request's annotation_filter than are allowed to be returned by annotations_limit. Note that if Annotations field is not being returned due to field mask, then this field will not be set to true no matter how many Annotations are there. + * The common part of the operation metadata. */ - hasTruncatedAnnotations?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * DataLabelingJob is used to trigger a human labeling job on unlabeled data from the following Dataset: + * A deployment of an Index. IndexEndpoints contain one or more DeployedIndexes. */ - export interface Schema$GoogleCloudAiplatformV1DataLabelingJob { + export interface Schema$GoogleCloudAiplatformV1DeployedIndex { /** - * Parameters that configure the active learning pipeline. Active learning will label the data incrementally via several iterations. For every iteration, it will select a batch of data based on the sampling strategy. + * Optional. A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2 (we don't provide SLA when min_replica_count=1). If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. */ - activeLearningConfig?: Schema$GoogleCloudAiplatformV1ActiveLearningConfig; + automaticResources?: Schema$GoogleCloudAiplatformV1AutomaticResources; /** - * Labels to assign to annotations generated by this DataLabelingJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Output only. Timestamp when the DeployedIndex was created. */ - annotationLabels?: {[key: string]: string} | null; + createTime?: string | null; /** - * Output only. Timestamp when this DataLabelingJob was created. + * Optional. A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field min_replica_count must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when min_replica_count=1. If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency. */ - createTime?: string | null; + dedicatedResources?: Schema$GoogleCloudAiplatformV1DedicatedResources; /** - * Output only. Estimated cost(in US dollars) that the DataLabelingJob has incurred to date. + * Optional. If set, the authentication is enabled for the private endpoint. */ - currentSpend?: Schema$GoogleTypeMoney; + deployedIndexAuthConfig?: Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfig; /** - * Required. Dataset resource names. Right now we only support labeling from a single Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}` + * Optional. The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. Creating `deployment_groups` with `reserved_ip_ranges` is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. Note: we only support up to 5 deployment groups(not including 'default'). */ - datasets?: string[] | null; + deploymentGroup?: string | null; /** - * Required. The user-defined name of the DataLabelingJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a DataLabelingJob. + * The display name of the DeployedIndex. If not provided upon creation, the Index's display_name is used. */ displayName?: string | null; /** - * Customer-managed encryption key spec for a DataLabelingJob. If set, this DataLabelingJob will be secured by this key. Note: Annotations created in the DataLabelingJob are associated with the EncryptionSpec of the Dataset they are exported to. + * Optional. If true, private endpoint's access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each MatchRequest. Note that logs may incur a cost, especially if the deployed index receives a high queries per second rate (QPS). Estimate your costs before enabling this option. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; + enableAccessLogging?: boolean | null; /** - * Output only. DataLabelingJob errors. It is only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + * Required. The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in. */ - error?: Schema$GoogleRpcStatus; + id?: string | null; /** - * Required. Input config parameters for the DataLabelingJob. + * Required. The name of the Index this is the deployment of. We may refer to this Index as the DeployedIndex's "original" Index. */ - inputs?: any | null; + index?: string | null; /** - * Required. Points to a YAML file stored on Google Cloud Storage describing the config for a specific type of DataLabelingJob. The schema files that can be used here are found in the https://storage.googleapis.com/google-cloud-aiplatform bucket in the /schema/datalabelingjob/inputs/ folder. + * Output only. The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the Index.update_time of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must list the operations that are running on the original Index. Only the successfully completed Operations with update_time equal or before this sync time are contained in this DeployedIndex. */ - inputsSchemaUri?: string | null; + indexSyncTime?: string | null; /** - * Required. The Google Cloud Storage location of the instruction pdf. This pdf is shared with labelers, and provides detailed description on how to label DataItems in Datasets. + * Output only. Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if network is configured. */ - instructionUri?: string | null; + privateEndpoints?: Schema$GoogleCloudAiplatformV1IndexPrivateEndpoints; /** - * Required. Number of labelers to work on each DataItem. + * Optional. A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. */ - labelerCount?: number | null; + reservedIpRanges?: string[] | null; + } + /** + * Used to set up the auth on the DeployedIndex's private endpoint. + */ + export interface Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfig { /** - * Output only. Current labeling job progress percentage scaled in interval [0, 100], indicating the percentage of DataItems that has been finished. + * Defines the authentication provider that the DeployedIndex uses. */ - labelingProgress?: number | null; + authProvider?: Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider; + } + /** + * Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). + */ + export interface Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider { /** - * The labels with user-defined metadata to organize your DataLabelingJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each DataLabelingJob: * "aiplatform.googleapis.com/schema": output only, its value is the inputs_schema's title. + * A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: `service-account-name@project-id.iam.gserviceaccount.com` */ - labels?: {[key: string]: string} | null; + allowedIssuers?: string[] | null; /** - * Output only. Resource name of the DataLabelingJob. + * The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. */ - name?: string | null; + audiences?: string[] | null; + } + /** + * Points to a DeployedIndex. + */ + export interface Schema$GoogleCloudAiplatformV1DeployedIndexRef { /** - * The SpecialistPools' resource names associated with this job. + * Immutable. The ID of the DeployedIndex in the above IndexEndpoint. */ - specialistPools?: string[] | null; + deployedIndexId?: string | null; /** - * Output only. The detailed state of the job. + * Output only. The display name of the DeployedIndex. */ - state?: string | null; + displayName?: string | null; /** - * Output only. Timestamp when this DataLabelingJob was updated most recently. + * Immutable. A resource name of the IndexEndpoint. */ - updateTime?: string | null; + indexEndpoint?: string | null; } /** - * A collection of DataItems and Annotations on them. + * A deployment of a Model. Endpoints contain one or more DeployedModels. */ - export interface Schema$GoogleCloudAiplatformV1Dataset { + export interface Schema$GoogleCloudAiplatformV1DeployedModel { /** - * Output only. Timestamp when this Dataset was created. + * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. */ - createTime?: string | null; + automaticResources?: Schema$GoogleCloudAiplatformV1AutomaticResources; /** - * Output only. The number of DataItems in this Dataset. Only apply for non-structured Dataset. + * Output only. Timestamp when the DeployedModel was created. */ - dataItemCount?: string | null; + createTime?: string | null; /** - * The description of the Dataset. + * A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. */ - description?: string | null; + dedicatedResources?: Schema$GoogleCloudAiplatformV1DedicatedResources; /** - * Required. The user-defined name of the Dataset. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. */ - displayName?: string | null; + disableContainerLogging?: boolean | null; /** - * Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. + * If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; + disableExplanations?: boolean | null; /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. */ - etag?: string | null; + displayName?: string | null; /** - * The labels with user-defined metadata to organize your Datasets. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Dataset: * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its value is the metadata_schema's title. + * If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. */ - labels?: {[key: string]: string} | null; + enableAccessLogging?: boolean | null; /** - * Required. Additional information about the Dataset. + * Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. */ - metadata?: any | null; + explanationSpec?: Schema$GoogleCloudAiplatformV1ExplanationSpec; /** - * Output only. The resource name of the Artifact that was created in MetadataStore when creating the Dataset. The Artifact resource name pattern is `projects/{project\}/locations/{location\}/metadataStores/{metadata_store\}/artifacts/{artifact\}`. + * Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are `/[0-9]/`. */ - metadataArtifact?: string | null; + id?: string | null; /** - * Required. Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/. + * Required. The resource name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. The resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. */ - metadataSchemaUri?: string | null; + model?: string | null; /** - * Optional. Reference to the public base model last used by the dataset. Only set for prompt datasets. + * Output only. The version ID of the model that is deployed. */ - modelReference?: string | null; + modelVersionId?: string | null; /** - * Output only. The resource name of the Dataset. + * Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured. */ - name?: string | null; + privateEndpoints?: Schema$GoogleCloudAiplatformV1PrivateEndpoints; /** - * All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will not be populated except for UI cases which will only use annotation_spec_count. In CreateDataset request, a SavedQuery is created together if this field is set, up to one SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not contain any AnnotationSpec. + * The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. */ - savedQueries?: Schema$GoogleCloudAiplatformV1SavedQuery[]; + serviceAccount?: string | null; /** - * Output only. Timestamp when this Dataset was last updated. + * The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` */ - updateTime?: string | null; + sharedResources?: string | null; } /** - * Describes the dataset version. + * Points to a DeployedModel. */ - export interface Schema$GoogleCloudAiplatformV1DatasetVersion { + export interface Schema$GoogleCloudAiplatformV1DeployedModelRef { /** - * Output only. Name of the associated BigQuery dataset. + * Immutable. An ID of a DeployedModel in the above Endpoint. */ - bigQueryDatasetName?: string | null; - /** - * Output only. Timestamp when this DatasetVersion was created. - */ - createTime?: string | null; - /** - * The user-defined name of the DatasetVersion. The name can be up to 128 characters long and can consist of any UTF-8 characters. - */ - displayName?: string | null; - /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; + deployedModelId?: string | null; /** - * Required. Output only. Additional information about the DatasetVersion. + * Immutable. A resource name of an Endpoint. */ - metadata?: any | null; + endpoint?: string | null; + } + /** + * Runtime operation information for IndexEndpointService.DeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1DeployIndexOperationMetadata { /** - * Output only. Reference to the public base model last used by the dataset version. Only set for prompt dataset versions. + * The unique index id specified by user */ - modelReference?: string | null; + deployedIndexId?: string | null; /** - * Output only. The resource name of the DatasetVersion. + * The operation generic information. */ - name?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Request message for IndexEndpointService.DeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1DeployIndexRequest { /** - * Output only. Timestamp when this DatasetVersion was last updated. + * Required. The DeployedIndex to be created within the IndexEndpoint. */ - updateTime?: string | null; + deployedIndex?: Schema$GoogleCloudAiplatformV1DeployedIndex; } /** - * A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. + * Response message for IndexEndpointService.DeployIndex. */ - export interface Schema$GoogleCloudAiplatformV1DedicatedResources { + export interface Schema$GoogleCloudAiplatformV1DeployIndexResponse { /** - * Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. + * The DeployedIndex that had been deployed in the IndexEndpoint. */ - autoscalingMetricSpecs?: Schema$GoogleCloudAiplatformV1AutoscalingMetricSpec[]; + deployedIndex?: Schema$GoogleCloudAiplatformV1DeployedIndex; + } + /** + * A description of resources that can be shared by multiple DeployedModels, whose underlying specification consists of a DedicatedResources. + */ + export interface Schema$GoogleCloudAiplatformV1DeploymentResourcePool { /** - * Required. Immutable. The specification of a single machine used by the prediction. + * Output only. Timestamp when this DeploymentResourcePool was created. */ - machineSpec?: Schema$GoogleCloudAiplatformV1MachineSpec; + createTime?: string | null; /** - * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). + * Required. The underlying DedicatedResources that the DeploymentResourcePool uses. */ - maxReplicaCount?: number | null; + dedicatedResources?: Schema$GoogleCloudAiplatformV1DedicatedResources; /** - * Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. + * Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` */ - minReplicaCount?: number | null; + name?: string | null; } /** - * Details of operations that delete Feature values. + * Runtime operation information for EndpointService.DeployModel. */ - export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1DeployModelOperationMetadata { /** - * Operation metadata for Featurestore delete Features values. + * The operation generic information. */ genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Request message for FeaturestoreService.DeleteFeatureValues. + * Request message for EndpointService.DeployModel. */ - export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequest { + export interface Schema$GoogleCloudAiplatformV1DeployModelRequest { /** - * Select feature values to be deleted by specifying entities. + * Required. The DeployedModel to be created within the Endpoint. Note that Endpoint.traffic_split must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via EndpointService.UpdateEndpoint. */ - selectEntity?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity; + deployedModel?: Schema$GoogleCloudAiplatformV1DeployedModel; /** - * Select feature values to be deleted by specifying time range and features. + * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's traffic_split will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by this method. The traffic percentage values must add up to 100. If this field is empty, then the Endpoint's traffic_split is not updated. */ - selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature; + trafficSplit?: {[key: string]: number} | null; } /** - * Message to select entity. If an entity id is selected, all the feature values corresponding to the entity id will be deleted, including the entityId. + * Response message for EndpointService.DeployModel. */ - export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity { + export interface Schema$GoogleCloudAiplatformV1DeployModelResponse { /** - * Required. Selectors choosing feature values of which entity id to be deleted from the EntityType. + * The DeployedModel that had been deployed in the Endpoint. */ - entityIdSelector?: Schema$GoogleCloudAiplatformV1EntityIdSelector; + deployedModel?: Schema$GoogleCloudAiplatformV1DeployedModel; } - /** - * Message to select time range and feature. Values of the selected feature generated within an inclusive time range will be deleted. Using this option permanently deletes the feature values from the specified feature IDs within the specified time range. This might include data from the online storage. If you want to retain any deleted historical data in the online storage, you must re-ingest it. - */ - export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { + export interface Schema$GoogleCloudAiplatformV1DestinationFeatureSetting { /** - * Required. Selectors choosing which feature values to be deleted from the EntityType. + * Specify the field name in the export destination. If not specified, Feature ID is used. */ - featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; + destinationField?: string | null; /** - * If set, data will not be deleted from online storage. When time range is older than the data in online storage, setting this to be true will make the deletion have no impact on online serving. + * Required. The ID of the Feature to apply the setting to. */ - skipOnlineStorageDelete?: boolean | null; + featureId?: string | null; + } + /** + * Request message for PredictionService.DirectPredict. + */ + export interface Schema$GoogleCloudAiplatformV1DirectPredictRequest { /** - * Required. Select feature generated within a half-inclusive time range. The time range is lower inclusive and upper exclusive. + * The prediction input. */ - timeRange?: Schema$GoogleTypeInterval; + inputs?: Schema$GoogleCloudAiplatformV1Tensor[]; + /** + * The parameters that govern the prediction. + */ + parameters?: Schema$GoogleCloudAiplatformV1Tensor; } /** - * Response message for FeaturestoreService.DeleteFeatureValues. + * Response message for PredictionService.DirectPredict. */ - export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponse { + export interface Schema$GoogleCloudAiplatformV1DirectPredictResponse { /** - * Response for request specifying the entities to delete + * The prediction output. */ - selectEntity?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity; + outputs?: Schema$GoogleCloudAiplatformV1Tensor[]; /** - * Response for request specifying time range and feature + * The parameters that govern the prediction. */ - selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature; + parameters?: Schema$GoogleCloudAiplatformV1Tensor; } /** - * Response message if the request uses the SelectEntity option. + * Request message for PredictionService.DirectRawPredict. */ - export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { + export interface Schema$GoogleCloudAiplatformV1DirectRawPredictRequest { /** - * The count of deleted entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. + * The prediction input. */ - offlineStorageDeletedEntityRowCount?: string | null; + input?: string | null; /** - * The count of deleted entities in the online storage. Each entity ID corresponds to one entity. + * Fully qualified name of the API method being invoked to perform predictions. Format: `/namespace.Service/Method/` Example: `/tensorflow.serving.PredictionService/Predict` */ - onlineStorageDeletedEntityCount?: string | null; + methodName?: string | null; } /** - * Response message if the request uses the SelectTimeRangeAndFeature option. + * Response message for PredictionService.DirectRawPredict. */ - export interface Schema$GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { + export interface Schema$GoogleCloudAiplatformV1DirectRawPredictResponse { /** - * The count of the features or columns impacted. This is the same as the feature count in the request. + * The prediction output. */ - impactedFeatureCount?: string | null; + output?: string | null; + } + /** + * Represents the spec of disk options. + */ + export interface Schema$GoogleCloudAiplatformV1DiskSpec { /** - * The count of modified entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. Within each row, only the features specified in the request are deleted. + * Size in GB of the boot disk (default is 100GB). */ - offlineStorageModifiedEntityRowCount?: string | null; + bootDiskSizeGb?: number | null; /** - * The count of modified entities in the online storage. Each entity ID corresponds to one entity. Within each entity, only the features specified in the request are deleted. + * Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). */ - onlineStorageModifiedEntityCount?: string | null; + bootDiskType?: string | null; } /** - * Details of operations that perform MetadataService.DeleteMetadataStore. + * A list of double values. */ - export interface Schema$GoogleCloudAiplatformV1DeleteMetadataStoreOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1DoubleArray { /** - * Operation metadata for deleting a MetadataStore. + * A list of double values. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + values?: number[] | null; } /** - * Details of operations that perform deletes of any entities. + * Represents a customer-managed encryption key spec that can be applied to a top-level resource. */ - export interface Schema$GoogleCloudAiplatformV1DeleteOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1EncryptionSpec { /** - * The common part of the operation metadata. + * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + kmsKeyName?: string | null; } /** - * A deployment of an Index. IndexEndpoints contain one or more DeployedIndexes. + * Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. */ - export interface Schema$GoogleCloudAiplatformV1DeployedIndex { + export interface Schema$GoogleCloudAiplatformV1Endpoint { /** - * Optional. A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2 (we don't provide SLA when min_replica_count=1). If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. + * Output only. Timestamp when this Endpoint was created. */ - automaticResources?: Schema$GoogleCloudAiplatformV1AutomaticResources; + createTime?: string | null; /** - * Output only. Timestamp when the DeployedIndex was created. + * Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. */ - createTime?: string | null; + deployedModels?: Schema$GoogleCloudAiplatformV1DeployedModel[]; /** - * Optional. A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field min_replica_count must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when min_replica_count=1. If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency. + * The description of the Endpoint. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1DedicatedResources; + description?: string | null; /** - * Optional. If set, the authentication is enabled for the private endpoint. + * Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - deployedIndexAuthConfig?: Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfig; + displayName?: string | null; /** - * Optional. The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. Creating `deployment_groups` with `reserved_ip_ranges` is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. Note: we only support up to 5 deployment groups(not including 'default'). + * Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. */ - deploymentGroup?: string | null; + enablePrivateServiceConnect?: boolean | null; /** - * The display name of the DeployedIndex. If not provided upon creation, the Index's display_name is used. + * Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. */ - displayName?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * Optional. If true, private endpoint's access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each MatchRequest. Note that logs may incur a cost, especially if the deployed index receives a high queries per second rate (QPS). Estimate your costs before enabling this option. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - enableAccessLogging?: boolean | null; + etag?: string | null; /** - * Required. The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in. + * The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - id?: string | null; + labels?: {[key: string]: string} | null; /** - * Required. The name of the Index this is the deployment of. We may refer to this Index as the DeployedIndex's "original" Index. + * Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project\}/locations/{location\}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job\}` */ - index?: string | null; + modelDeploymentMonitoringJob?: string | null; /** - * Output only. The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the Index.update_time of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must list the operations that are running on the original Index. Only the successfully completed Operations with update_time equal or before this sync time are contained in this DeployedIndex. + * Output only. The resource name of the Endpoint. */ - indexSyncTime?: string | null; + name?: string | null; /** - * Output only. Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if network is configured. + * Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project\}/global/networks/{network\}`. Where `{project\}` is a project number, as in `12345`, and `{network\}` is network name. */ - privateEndpoints?: Schema$GoogleCloudAiplatformV1IndexPrivateEndpoints; + network?: string | null; /** - * Optional. A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. + * Configures the request-response logging for online prediction. */ - reservedIpRanges?: string[] | null; - } - /** - * Used to set up the auth on the DeployedIndex's private endpoint. - */ - export interface Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfig { + predictRequestResponseLoggingConfig?: Schema$GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig; /** - * Defines the authentication provider that the DeployedIndex uses. + * Optional. Configuration for private service connect. network and private_service_connect_config are mutually exclusive. */ - authProvider?: Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider; - } - /** - * Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). - */ - export interface Schema$GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider { + privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1PrivateServiceConnectConfig; /** - * A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: `service-account-name@project-id.iam.gserviceaccount.com` + * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. */ - allowedIssuers?: string[] | null; + trafficSplit?: {[key: string]: number} | null; /** - * The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. + * Output only. Timestamp when this Endpoint was last updated. */ - audiences?: string[] | null; + updateTime?: string | null; } /** - * Points to a DeployedIndex. + * Selector for entityId. Getting ids from the given source. */ - export interface Schema$GoogleCloudAiplatformV1DeployedIndexRef { + export interface Schema$GoogleCloudAiplatformV1EntityIdSelector { /** - * Immutable. The ID of the DeployedIndex in the above IndexEndpoint. - */ - deployedIndexId?: string | null; - /** - * Output only. The display name of the DeployedIndex. + * Source of Csv */ - displayName?: string | null; + csvSource?: Schema$GoogleCloudAiplatformV1CsvSource; /** - * Immutable. A resource name of the IndexEndpoint. + * Source column that holds entity IDs. If not provided, entity IDs are extracted from the column named entity_id. */ - indexEndpoint?: string | null; + entityIdField?: string | null; } /** - * A deployment of a Model. Endpoints contain one or more DeployedModels. + * An entity type is a type of object in a system that needs to be modeled and have stored information about. For example, driver is an entity type, and driver0 is an instance of an entity type driver. */ - export interface Schema$GoogleCloudAiplatformV1DeployedModel { + export interface Schema$GoogleCloudAiplatformV1EntityType { /** - * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. + * Output only. Timestamp when this EntityType was created. */ - automaticResources?: Schema$GoogleCloudAiplatformV1AutomaticResources; + createTime?: string | null; /** - * Output only. Timestamp when the DeployedModel was created. + * Optional. Description of the EntityType. */ - createTime?: string | null; + description?: string | null; /** - * A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. + * Optional. Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1DedicatedResources; + etag?: string | null; /** - * For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + * Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one EntityType (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - disableContainerLogging?: boolean | null; + labels?: {[key: string]: string} | null; /** - * If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. + * Optional. The default monitoring configuration for all Features with value type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 under this EntityType. If this is populated with [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is disabled. */ - disableExplanations?: boolean | null; + monitoringConfig?: Schema$GoogleCloudAiplatformV1FeaturestoreMonitoringConfig; /** - * The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. + * Immutable. Name of the EntityType. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. */ - displayName?: string | null; + name?: string | null; /** - * If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. + * Optional. Config for data retention policy in offline storage. TTL in days for feature values that will be stored in offline storage. The Feature Store offline storage periodically removes obsolete feature values older than `offline_storage_ttl_days` since the feature generation time. If unset (or explicitly set to 0), default to 4000 days TTL. */ - enableAccessLogging?: boolean | null; + offlineStorageTtlDays?: number | null; /** - * Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. + * Output only. Timestamp when this EntityType was most recently updated. */ - explanationSpec?: Schema$GoogleCloudAiplatformV1ExplanationSpec; + updateTime?: string | null; + } + /** + * Represents an environment variable present in a Container or Python Module. + */ + export interface Schema$GoogleCloudAiplatformV1EnvVar { /** - * Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are `/[0-9]/`. + * Required. Name of the environment variable. Must be a valid C identifier. */ - id?: string | null; + name?: string | null; /** - * Required. The resource name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. The resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. + * Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. */ - model?: string | null; + value?: string | null; + } + /** + * Model error analysis for each annotation. + */ + export interface Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotation { /** - * Output only. The version ID of the model that is deployed. + * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. */ - modelVersionId?: string | null; + attributedItems?: Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem[]; /** - * Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured. + * The outlier score of this annotated item. Usually defined as the min of all distances from attributed items. */ - privateEndpoints?: Schema$GoogleCloudAiplatformV1PrivateEndpoints; + outlierScore?: number | null; /** - * The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + * The threshold used to determine if this annotation is an outlier or not. */ - serviceAccount?: string | null; + outlierThreshold?: number | null; /** - * The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` + * The query type used for finding the attributed items. */ - sharedResources?: string | null; + queryType?: string | null; } /** - * Points to a DeployedModel. + * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. */ - export interface Schema$GoogleCloudAiplatformV1DeployedModelRef { + export interface Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem { /** - * Immutable. An ID of a DeployedModel in the above Endpoint. + * The unique ID for each annotation. Used by FE to allocate the annotation in DB. */ - deployedModelId?: string | null; + annotationResourceName?: string | null; /** - * Immutable. A resource name of an Endpoint. + * The distance of this item to the annotation. */ - endpoint?: string | null; + distance?: number | null; } /** - * Runtime operation information for IndexEndpointService.DeployIndex. + * True positive, false positive, or false negative. EvaluatedAnnotation is only available under ModelEvaluationSlice with slice of `annotationSpec` dimension. */ - export interface Schema$GoogleCloudAiplatformV1DeployIndexOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1EvaluatedAnnotation { /** - * The unique index id specified by user + * Output only. The data item payload that the Model predicted this EvaluatedAnnotation on. */ - deployedIndexId?: string | null; + dataItemPayload?: any | null; /** - * The operation generic information. + * Annotations of model error analysis results. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Request message for IndexEndpointService.DeployIndex. - */ - export interface Schema$GoogleCloudAiplatformV1DeployIndexRequest { + errorAnalysisAnnotations?: Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotation[]; /** - * Required. The DeployedIndex to be created within the IndexEndpoint. + * Output only. ID of the EvaluatedDataItemView under the same ancestor ModelEvaluation. The EvaluatedDataItemView consists of all ground truths and predictions on data_item_payload. */ - deployedIndex?: Schema$GoogleCloudAiplatformV1DeployedIndex; - } - /** - * Response message for IndexEndpointService.DeployIndex. - */ - export interface Schema$GoogleCloudAiplatformV1DeployIndexResponse { + evaluatedDataItemViewId?: string | null; /** - * The DeployedIndex that had been deployed in the IndexEndpoint. + * Explanations of predictions. Each element of the explanations indicates the explanation for one explanation Method. The attributions list in the EvaluatedAnnotationExplanation.explanation object corresponds to the predictions list. For example, the second element in the attributions list explains the second element in the predictions list. */ - deployedIndex?: Schema$GoogleCloudAiplatformV1DeployedIndex; - } - /** - * A description of resources that can be shared by multiple DeployedModels, whose underlying specification consists of a DedicatedResources. - */ - export interface Schema$GoogleCloudAiplatformV1DeploymentResourcePool { + explanations?: Schema$GoogleCloudAiplatformV1EvaluatedAnnotationExplanation[]; /** - * Output only. Timestamp when this DeploymentResourcePool was created. + * Output only. The ground truth Annotations, i.e. the Annotations that exist in the test data the Model is evaluated on. For true positive, there is one and only one ground truth annotation, which matches the only prediction in predictions. For false positive, there are zero or more ground truth annotations that are similar to the only prediction in predictions, but not enough for a match. For false negative, there is one and only one ground truth annotation, which doesn't match any predictions created by the model. The schema of the ground truth is stored in ModelEvaluation.annotation_schema_uri */ - createTime?: string | null; + groundTruths?: any[] | null; /** - * Required. The underlying DedicatedResources that the DeploymentResourcePool uses. + * Output only. The model predicted annotations. For true positive, there is one and only one prediction, which matches the only one ground truth annotation in ground_truths. For false positive, there is one and only one prediction, which doesn't match any ground truth annotation of the corresponding data_item_view_id. For false negative, there are zero or more predictions which are similar to the only ground truth annotation in ground_truths but not enough for a match. The schema of the prediction is stored in ModelEvaluation.annotation_schema_uri */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1DedicatedResources; + predictions?: any[] | null; /** - * Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` + * Output only. Type of the EvaluatedAnnotation. */ - name?: string | null; + type?: string | null; } /** - * Runtime operation information for EndpointService.DeployModel. + * Explanation result of the prediction produced by the Model. */ - export interface Schema$GoogleCloudAiplatformV1DeployModelOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1EvaluatedAnnotationExplanation { /** - * The operation generic information. + * Explanation attribution response details. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + explanation?: Schema$GoogleCloudAiplatformV1Explanation; + /** + * Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` + */ + explanationType?: string | null; } /** - * Request message for EndpointService.DeployModel. + * An edge describing the relationship between an Artifact and an Execution in a lineage graph. */ - export interface Schema$GoogleCloudAiplatformV1DeployModelRequest { + export interface Schema$GoogleCloudAiplatformV1Event { /** - * Required. The DeployedModel to be created within the Endpoint. Note that Endpoint.traffic_split must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via EndpointService.UpdateEndpoint. + * Required. The relative resource name of the Artifact in the Event. */ - deployedModel?: Schema$GoogleCloudAiplatformV1DeployedModel; + artifact?: string | null; /** - * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's traffic_split will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by this method. The traffic percentage values must add up to 100. If this field is empty, then the Endpoint's traffic_split is not updated. + * Output only. Time the Event occurred. */ - trafficSplit?: {[key: string]: number} | null; - } - /** - * Response message for EndpointService.DeployModel. - */ - export interface Schema$GoogleCloudAiplatformV1DeployModelResponse { + eventTime?: string | null; /** - * The DeployedModel that had been deployed in the Endpoint. + * Output only. The relative resource name of the Execution in the Event. */ - deployedModel?: Schema$GoogleCloudAiplatformV1DeployedModel; - } - export interface Schema$GoogleCloudAiplatformV1DestinationFeatureSetting { + execution?: string | null; /** - * Specify the field name in the export destination. If not specified, Feature ID is used. + * The labels with user-defined metadata to annotate Events. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Event (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - destinationField?: string | null; + labels?: {[key: string]: string} | null; /** - * Required. The ID of the Feature to apply the setting to. + * Required. The type of the Event. */ - featureId?: string | null; + type?: string | null; } /** - * Request message for PredictionService.DirectPredict. + * Example-based explainability that returns the nearest neighbors from the provided dataset. */ - export interface Schema$GoogleCloudAiplatformV1DirectPredictRequest { + export interface Schema$GoogleCloudAiplatformV1Examples { /** - * The prediction input. + * The Cloud Storage input instances. */ - inputs?: Schema$GoogleCloudAiplatformV1Tensor[]; + exampleGcsSource?: Schema$GoogleCloudAiplatformV1ExamplesExampleGcsSource; /** - * The parameters that govern the prediction. + * The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). */ - parameters?: Schema$GoogleCloudAiplatformV1Tensor; - } - /** - * Response message for PredictionService.DirectPredict. - */ - export interface Schema$GoogleCloudAiplatformV1DirectPredictResponse { + nearestNeighborSearchConfig?: any | null; /** - * The prediction output. + * The number of neighbors to return when querying for examples. */ - outputs?: Schema$GoogleCloudAiplatformV1Tensor[]; + neighborCount?: number | null; /** - * The parameters that govern the prediction. + * Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality. */ - parameters?: Schema$GoogleCloudAiplatformV1Tensor; + presets?: Schema$GoogleCloudAiplatformV1Presets; } /** - * Request message for PredictionService.DirectRawPredict. + * The Cloud Storage input instances. */ - export interface Schema$GoogleCloudAiplatformV1DirectRawPredictRequest { + export interface Schema$GoogleCloudAiplatformV1ExamplesExampleGcsSource { /** - * The prediction input. + * The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. */ - input?: string | null; + dataFormat?: string | null; /** - * Fully qualified name of the API method being invoked to perform predictions. Format: `/namespace.Service/Method/` Example: `/tensorflow.serving.PredictionService/Predict` + * The Cloud Storage location for the input instances. */ - methodName?: string | null; + gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; } /** - * Response message for PredictionService.DirectRawPredict. + * Overrides for example-based explanations. */ - export interface Schema$GoogleCloudAiplatformV1DirectRawPredictResponse { + export interface Schema$GoogleCloudAiplatformV1ExamplesOverride { /** - * The prediction output. + * The number of neighbors to return that have the same crowding tag. */ - output?: string | null; - } - /** - * Represents the spec of disk options. - */ - export interface Schema$GoogleCloudAiplatformV1DiskSpec { + crowdingCount?: number | null; /** - * Size in GB of the boot disk (default is 100GB). + * The format of the data being provided with each call. */ - bootDiskSizeGb?: number | null; + dataFormat?: string | null; /** - * Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + * The number of neighbors to return. */ - bootDiskType?: string | null; - } - /** - * A list of double values. - */ - export interface Schema$GoogleCloudAiplatformV1DoubleArray { + neighborCount?: number | null; /** - * A list of double values. + * Restrict the resulting nearest neighbors to respect these constraints. */ - values?: number[] | null; + restrictions?: Schema$GoogleCloudAiplatformV1ExamplesRestrictionsNamespace[]; + /** + * If true, return the embeddings instead of neighbors. + */ + returnEmbeddings?: boolean | null; } /** - * Represents a customer-managed encryption key spec that can be applied to a top-level resource. + * Restrictions namespace for example-based explanations overrides. */ - export interface Schema$GoogleCloudAiplatformV1EncryptionSpec { + export interface Schema$GoogleCloudAiplatformV1ExamplesRestrictionsNamespace { /** - * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + * The list of allowed tags. */ - kmsKeyName?: string | null; + allow?: string[] | null; + /** + * The list of deny tags. + */ + deny?: string[] | null; + /** + * The namespace name. + */ + namespaceName?: string | null; } /** - * Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + * Instance of a general execution. */ - export interface Schema$GoogleCloudAiplatformV1Endpoint { + export interface Schema$GoogleCloudAiplatformV1Execution { /** - * Output only. Timestamp when this Endpoint was created. + * Output only. Timestamp when this Execution was created. */ createTime?: string | null; /** - * Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. - */ - deployedModels?: Schema$GoogleCloudAiplatformV1DeployedModel[]; - /** - * The description of the Endpoint. + * Description of the Execution */ description?: string | null; /** - * Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * User provided display name of the Execution. May be up to 128 Unicode characters. */ displayName?: string | null; /** - * Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. - */ - enablePrivateServiceConnect?: boolean | null; - /** - * Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. - */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; - /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ etag?: string | null; /** - * The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). */ labels?: {[key: string]: string} | null; /** - * Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project\}/locations/{location\}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job\}` + * Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. */ - modelDeploymentMonitoringJob?: string | null; + metadata?: {[key: string]: any} | null; /** - * Output only. The resource name of the Endpoint. + * Output only. The resource name of the Execution. */ name?: string | null; /** - * Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project\}/global/networks/{network\}`. Where `{project\}` is a project number, as in `12345`, and `{network\}` is network name. - */ - network?: string | null; - /** - * Configures the request-response logging for online prediction. + * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - predictRequestResponseLoggingConfig?: Schema$GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig; + schemaTitle?: string | null; /** - * Optional. Configuration for private service connect. network and private_service_connect_config are mutually exclusive. + * The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1PrivateServiceConnectConfig; + schemaVersion?: string | null; /** - * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + * The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. */ - trafficSplit?: {[key: string]: number} | null; + state?: string | null; /** - * Output only. Timestamp when this Endpoint was last updated. + * Output only. Timestamp when this Execution was last updated. */ updateTime?: string | null; } /** - * Selector for entityId. Getting ids from the given source. - */ - export interface Schema$GoogleCloudAiplatformV1EntityIdSelector { - /** - * Source of Csv - */ - csvSource?: Schema$GoogleCloudAiplatformV1CsvSource; - /** - * Source column that holds entity IDs. If not provided, entity IDs are extracted from the column named entity_id. - */ - entityIdField?: string | null; - } - /** - * An entity type is a type of object in a system that needs to be modeled and have stored information about. For example, driver is an entity type, and driver0 is an instance of an entity type driver. + * Request message for PredictionService.Explain. */ - export interface Schema$GoogleCloudAiplatformV1EntityType { - /** - * Output only. Timestamp when this EntityType was created. - */ - createTime?: string | null; + export interface Schema$GoogleCloudAiplatformV1ExplainRequest { /** - * Optional. Description of the EntityType. + * If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding Endpoint.traffic_split. */ - description?: string | null; + deployedModelId?: string | null; /** - * Optional. Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * If specified, overrides the explanation_spec of the DeployedModel. Can be used for explaining prediction results with different configurations, such as: - Explaining top-5 predictions results as opposed to top-1; - Increasing path count or step count of the attribution methods to reduce approximate errors; - Using different baselines for explaining the prediction results. */ - etag?: string | null; + explanationSpecOverride?: Schema$GoogleCloudAiplatformV1ExplanationSpecOverride; /** - * Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one EntityType (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Required. The instances that are the input to the explanation call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the explanation call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. */ - labels?: {[key: string]: string} | null; + instances?: any[] | null; /** - * Optional. The default monitoring configuration for all Features with value type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 under this EntityType. If this is populated with [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is disabled. + * The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' Model's PredictSchemata's parameters_schema_uri. */ - monitoringConfig?: Schema$GoogleCloudAiplatformV1FeaturestoreMonitoringConfig; + parameters?: any | null; + } + /** + * Response message for PredictionService.Explain. + */ + export interface Schema$GoogleCloudAiplatformV1ExplainResponse { /** - * Immutable. Name of the EntityType. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. + * ID of the Endpoint's DeployedModel that served this explanation. */ - name?: string | null; + deployedModelId?: string | null; /** - * Optional. Config for data retention policy in offline storage. TTL in days for feature values that will be stored in offline storage. The Feature Store offline storage periodically removes obsolete feature values older than `offline_storage_ttl_days` since the feature generation time. If unset (or explicitly set to 0), default to 4000 days TTL. + * The explanations of the Model's PredictResponse.predictions. It has the same number of elements as instances to be explained. */ - offlineStorageTtlDays?: number | null; + explanations?: Schema$GoogleCloudAiplatformV1Explanation[]; /** - * Output only. Timestamp when this EntityType was most recently updated. + * The predictions that are the output of the predictions call. Same as PredictResponse.predictions. */ - updateTime?: string | null; + predictions?: any[] | null; } /** - * Represents an environment variable present in a Container or Python Module. + * Explanation of a prediction (provided in PredictResponse.predictions) produced by the Model on a given instance. */ - export interface Schema$GoogleCloudAiplatformV1EnvVar { + export interface Schema$GoogleCloudAiplatformV1Explanation { /** - * Required. Name of the environment variable. Must be a valid C identifier. + * Output only. Feature attributions grouped by predicted outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. By default, we provide Shapley values for the predicted class. However, you can configure the explanation request to generate Shapley values for any other classes too. For example, if a model predicts a probability of `0.4` for approving a loan application, the model's decision is to reject the application since `p(reject) = 0.6 \> p(approve) = 0.4`, and the default Shapley values would be computed for rejection decision and not approval, even though the latter might be the positive class. If users set ExplanationParameters.top_k, the attributions are sorted by instance_output_value in descending order. If ExplanationParameters.output_indices is specified, the attributions are stored by Attribution.output_index in the same order as they appear in the output_indices. */ - name?: string | null; + attributions?: Schema$GoogleCloudAiplatformV1Attribution[]; /** - * Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + * Output only. List of the nearest neighbors for example-based explanations. For models deployed with the examples explanations feature enabled, the attributions field is empty and instead the neighbors field is populated. */ - value?: string | null; + neighbors?: Schema$GoogleCloudAiplatformV1Neighbor[]; } /** - * Model error analysis for each annotation. + * Metadata describing the Model's input and output for explanation. */ - export interface Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotation { + export interface Schema$GoogleCloudAiplatformV1ExplanationMetadata { /** - * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. + * Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - attributedItems?: Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem[]; + featureAttributionsSchemaUri?: string | null; /** - * The outlier score of this annotated item. Usually defined as the min of all distances from attributed items. + * Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. */ - outlierScore?: number | null; + inputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadata; + } | null; /** - * The threshold used to determine if this annotation is an outlier or not. + * Name of the source to generate embeddings for example based explanations. */ - outlierThreshold?: number | null; + latentSpaceSource?: string | null; /** - * The query type used for finding the attributed items. + * Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. */ - queryType?: string | null; + outputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata; + } | null; } /** - * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. + * Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. */ - export interface Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem { + export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadata { /** - * The unique ID for each annotation. Used by FE to allocate the annotation in DB. + * Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ - annotationResourceName?: string | null; + denseShapeTensorName?: string | null; /** - * The distance of this item to the annotation. + * A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor. */ - distance?: number | null; - } - /** - * True positive, false positive, or false negative. EvaluatedAnnotation is only available under ModelEvaluationSlice with slice of `annotationSpec` dimension. - */ - export interface Schema$GoogleCloudAiplatformV1EvaluatedAnnotation { + encodedBaselines?: any[] | null; /** - * Output only. The data item payload that the Model predicted this EvaluatedAnnotation on. + * Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table. */ - dataItemPayload?: any | null; + encodedTensorName?: string | null; /** - * Annotations of model error analysis results. + * Defines how the feature is encoded into the input tensor. Defaults to IDENTITY. */ - errorAnalysisAnnotations?: Schema$GoogleCloudAiplatformV1ErrorAnalysisAnnotation[]; + encoding?: string | null; /** - * Output only. ID of the EvaluatedDataItemView under the same ancestor ModelEvaluation. The EvaluatedDataItemView consists of all ground truths and predictions on data_item_payload. + * The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized. */ - evaluatedDataItemViewId?: string | null; + featureValueDomain?: Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain; /** - * Explanations of predictions. Each element of the explanations indicates the explanation for one explanation Method. The attributions list in the EvaluatedAnnotationExplanation.explanation object corresponds to the predictions list. For example, the second element in the attributions list explains the second element in the predictions list. + * Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name. */ - explanations?: Schema$GoogleCloudAiplatformV1EvaluatedAnnotationExplanation[]; + groupName?: string | null; /** - * Output only. The ground truth Annotations, i.e. the Annotations that exist in the test data the Model is evaluated on. For true positive, there is one and only one ground truth annotation, which matches the only prediction in predictions. For false positive, there are zero or more ground truth annotations that are similar to the only prediction in predictions, but not enough for a match. For false negative, there is one and only one ground truth annotation, which doesn't match any predictions created by the model. The schema of the ground truth is stored in ModelEvaluation.annotation_schema_uri + * A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. */ - groundTruths?: any[] | null; + indexFeatureMapping?: string[] | null; /** - * Output only. The model predicted annotations. For true positive, there is one and only one prediction, which matches the only one ground truth annotation in ground_truths. For false positive, there is one and only one prediction, which doesn't match any ground truth annotation of the corresponding data_item_view_id. For false negative, there are zero or more predictions which are similar to the only ground truth annotation in ground_truths but not enough for a match. The schema of the prediction is stored in ModelEvaluation.annotation_schema_uri + * Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ - predictions?: any[] | null; + indicesTensorName?: string | null; /** - * Output only. Type of the EvaluatedAnnotation. + * Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature's input in the instance[]. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. */ - type?: string | null; - } - /** - * Explanation result of the prediction produced by the Model. - */ - export interface Schema$GoogleCloudAiplatformV1EvaluatedAnnotationExplanation { + inputBaselines?: any[] | null; /** - * Explanation attribution response details. + * Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow. */ - explanation?: Schema$GoogleCloudAiplatformV1Explanation; + inputTensorName?: string | null; /** - * Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` + * Modality of the feature. Valid values are: numeric, image. Defaults to numeric. */ - explanationType?: string | null; + modality?: string | null; + /** + * Visualization configurations for image explanation. + */ + visualization?: Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization; } /** - * An edge describing the relationship between an Artifact and an Execution in a lineage graph. + * Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. */ - export interface Schema$GoogleCloudAiplatformV1Event { - /** - * Required. The relative resource name of the Artifact in the Event. - */ - artifact?: string | null; + export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain { /** - * Output only. Time the Event occurred. + * The maximum permissible value for this feature. */ - eventTime?: string | null; + maxValue?: number | null; /** - * Output only. The relative resource name of the Execution in the Event. + * The minimum permissible value for this feature. */ - execution?: string | null; + minValue?: number | null; /** - * The labels with user-defined metadata to annotate Events. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Event (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization. */ - labels?: {[key: string]: string} | null; + originalMean?: number | null; /** - * Required. The type of the Event. + * If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization. */ - type?: string | null; + originalStddev?: number | null; } /** - * Example-based explainability that returns the nearest neighbors from the provided dataset. + * Visualization configurations for image explanation. */ - export interface Schema$GoogleCloudAiplatformV1Examples { + export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization { /** - * The Cloud Storage input instances. + * Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62. */ - exampleGcsSource?: Schema$GoogleCloudAiplatformV1ExamplesExampleGcsSource; + clipPercentLowerbound?: number | null; /** - * The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + * Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9. */ - nearestNeighborSearchConfig?: any | null; + clipPercentUpperbound?: number | null; /** - * The number of neighbors to return when querying for examples. + * The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue. */ - neighborCount?: number | null; + colorMap?: string | null; /** - * Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality. + * How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE. */ - presets?: Schema$GoogleCloudAiplatformV1Presets; - } - /** - * The Cloud Storage input instances. - */ - export interface Schema$GoogleCloudAiplatformV1ExamplesExampleGcsSource { + overlayType?: string | null; /** - * The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + * Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE. */ - dataFormat?: string | null; + polarity?: string | null; /** - * The Cloud Storage location for the input instances. + * Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES. */ - gcsSource?: Schema$GoogleCloudAiplatformV1GcsSource; + type?: string | null; } /** - * Overrides for example-based explanations. + * Metadata of the prediction output to be explained. */ - export interface Schema$GoogleCloudAiplatformV1ExamplesOverride { - /** - * The number of neighbors to return that have the same crowding tag. - */ - crowdingCount?: number | null; - /** - * The format of the data being provided with each call. - */ - dataFormat?: string | null; + export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata { /** - * The number of neighbors to return. + * Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output. */ - neighborCount?: number | null; + displayNameMappingKey?: string | null; /** - * Restrict the resulting nearest neighbors to respect these constraints. + * Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It's not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index. */ - restrictions?: Schema$GoogleCloudAiplatformV1ExamplesRestrictionsNamespace[]; + indexDisplayNameMapping?: any | null; /** - * If true, return the embeddings instead of neighbors. + * Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow. */ - returnEmbeddings?: boolean | null; + outputTensorName?: string | null; } /** - * Restrictions namespace for example-based explanations overrides. + * The ExplanationMetadata entries that can be overridden at online explanation time. */ - export interface Schema$GoogleCloudAiplatformV1ExamplesRestrictionsNamespace { - /** - * The list of allowed tags. - */ - allow?: string[] | null; + export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataOverride { /** - * The list of deny tags. + * Required. Overrides the input metadata of the features. The key is the name of the feature to be overridden. The keys specified here must exist in the input metadata to be overridden. If a feature is not specified here, the corresponding feature's input metadata is not overridden. */ - deny?: string[] | null; + inputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride; + } | null; + } + /** + * The input metadata entries to be overridden. + */ + export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride { /** - * The namespace name. + * Baseline inputs for this feature. This overrides the `input_baseline` field of the ExplanationMetadata.InputMetadata object of the corresponding feature's input metadata. If it's not specified, the original baselines are not overridden. */ - namespaceName?: string | null; + inputBaselines?: any[] | null; } /** - * Instance of a general execution. + * Parameters to configure explaining for Model's predictions. */ - export interface Schema$GoogleCloudAiplatformV1Execution { + export interface Schema$GoogleCloudAiplatformV1ExplanationParameters { /** - * Output only. Timestamp when this Execution was created. + * Example-based explanations that returns the nearest neighbors from the provided dataset. */ - createTime?: string | null; + examples?: Schema$GoogleCloudAiplatformV1Examples; /** - * Description of the Execution + * An attribution method that computes Aumann-Shapley values taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 */ - description?: string | null; + integratedGradientsAttribution?: Schema$GoogleCloudAiplatformV1IntegratedGradientsAttribution; /** - * User provided display name of the Execution. May be up to 128 Unicode characters. + * If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). */ - displayName?: string | null; + outputIndices?: any[] | null; /** - * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265. */ - etag?: string | null; + sampledShapleyAttribution?: Schema$GoogleCloudAiplatformV1SampledShapleyAttribution; /** - * The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). + * If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. */ - labels?: {[key: string]: string} | null; + topK?: number | null; /** - * Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + * An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead. */ - metadata?: {[key: string]: any} | null; + xraiAttribution?: Schema$GoogleCloudAiplatformV1XraiAttribution; + } + /** + * Specification of Model explanation. + */ + export interface Schema$GoogleCloudAiplatformV1ExplanationSpec { /** - * Output only. The resource name of the Execution. + * Optional. Metadata describing the Model's input and output for explanation. */ - name?: string | null; + metadata?: Schema$GoogleCloudAiplatformV1ExplanationMetadata; /** - * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Required. Parameters that configure explaining of the Model's predictions. */ - schemaTitle?: string | null; + parameters?: Schema$GoogleCloudAiplatformV1ExplanationParameters; + } + /** + * The ExplanationSpec entries that can be overridden at online explanation time. + */ + export interface Schema$GoogleCloudAiplatformV1ExplanationSpecOverride { /** - * The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * The example-based explanations parameter overrides. */ - schemaVersion?: string | null; + examplesOverride?: Schema$GoogleCloudAiplatformV1ExamplesOverride; /** - * The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. + * The metadata to be overridden. If not specified, no metadata is overridden. */ - state?: string | null; + metadata?: Schema$GoogleCloudAiplatformV1ExplanationMetadataOverride; /** - * Output only. Timestamp when this Execution was last updated. + * The parameters to be overridden. Note that the attribution method cannot be changed. If not specified, no parameter is overridden. */ - updateTime?: string | null; + parameters?: Schema$GoogleCloudAiplatformV1ExplanationParameters; } /** - * Request message for PredictionService.Explain. + * Describes what part of the Dataset is to be exported, the destination of the export and how to export. */ - export interface Schema$GoogleCloudAiplatformV1ExplainRequest { + export interface Schema$GoogleCloudAiplatformV1ExportDataConfig { /** - * If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding Endpoint.traffic_split. + * The Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. */ - deployedModelId?: string | null; + annotationSchemaUri?: string | null; /** - * If specified, overrides the explanation_spec of the DeployedModel. Can be used for explaining prediction results with different configurations, such as: - Explaining top-5 predictions results as opposed to top-1; - Increasing path count or step count of the attribution methods to reduce approximate errors; - Using different baselines for explaining the prediction results. + * An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations. */ - explanationSpecOverride?: Schema$GoogleCloudAiplatformV1ExplanationSpecOverride; + annotationsFilter?: string | null; /** - * Required. The instances that are the input to the explanation call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the explanation call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. + * Indicates the usage of the exported files. */ - instances?: any[] | null; + exportUse?: string | null; /** - * The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' Model's PredictSchemata's parameters_schema_uri. + * Split based on the provided filters for each set. */ - parameters?: any | null; - } - /** - * Response message for PredictionService.Explain. - */ - export interface Schema$GoogleCloudAiplatformV1ExplainResponse { + filterSplit?: Schema$GoogleCloudAiplatformV1ExportFilterSplit; /** - * ID of the Endpoint's DeployedModel that served this explanation. + * Split based on fractions defining the size of each set. */ - deployedModelId?: string | null; + fractionSplit?: Schema$GoogleCloudAiplatformV1ExportFractionSplit; /** - * The explanations of the Model's PredictResponse.predictions. It has the same number of elements as instances to be explained. + * The Google Cloud Storage location where the output is to be written to. In the given directory a new directory will be created with name: `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. Inside that directory, annotations with the same schema will be grouped into sub directories which are named with the corresponding annotations' schema title. Inside these sub directories, a schema.yaml will be created to describe the output format. */ - explanations?: Schema$GoogleCloudAiplatformV1Explanation[]; + gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; /** - * The predictions that are the output of the predictions call. Same as PredictResponse.predictions. + * The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. */ - predictions?: any[] | null; + savedQueryId?: string | null; } /** - * Explanation of a prediction (provided in PredictResponse.predictions) produced by the Model on a given instance. + * Runtime operation information for DatasetService.ExportData. */ - export interface Schema$GoogleCloudAiplatformV1Explanation { + export interface Schema$GoogleCloudAiplatformV1ExportDataOperationMetadata { /** - * Output only. Feature attributions grouped by predicted outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. By default, we provide Shapley values for the predicted class. However, you can configure the explanation request to generate Shapley values for any other classes too. For example, if a model predicts a probability of `0.4` for approving a loan application, the model's decision is to reject the application since `p(reject) = 0.6 \> p(approve) = 0.4`, and the default Shapley values would be computed for rejection decision and not approval, even though the latter might be the positive class. If users set ExplanationParameters.top_k, the attributions are sorted by instance_output_value in descending order. If ExplanationParameters.output_indices is specified, the attributions are stored by Attribution.output_index in the same order as they appear in the output_indices. + * A Google Cloud Storage directory which path ends with '/'. The exported data is stored in the directory. */ - attributions?: Schema$GoogleCloudAiplatformV1Attribution[]; + gcsOutputDirectory?: string | null; /** - * Output only. List of the nearest neighbors for example-based explanations. For models deployed with the examples explanations feature enabled, the attributions field is empty and instead the neighbors field is populated. + * The common part of the operation metadata. */ - neighbors?: Schema$GoogleCloudAiplatformV1Neighbor[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Metadata describing the Model's input and output for explanation. + * Request message for DatasetService.ExportData. */ - export interface Schema$GoogleCloudAiplatformV1ExplanationMetadata { - /** - * Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. - */ - featureAttributionsSchemaUri?: string | null; + export interface Schema$GoogleCloudAiplatformV1ExportDataRequest { /** - * Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. + * Required. The desired output location. */ - inputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadata; - } | null; + exportConfig?: Schema$GoogleCloudAiplatformV1ExportDataConfig; + } + /** + * Response message for DatasetService.ExportData. + */ + export interface Schema$GoogleCloudAiplatformV1ExportDataResponse { /** - * Name of the source to generate embeddings for example based explanations. + * Only present for custom code training export use case. Records data stats, i.e., train/validation/test item/annotation counts calculated during the export operation. */ - latentSpaceSource?: string | null; + dataStats?: Schema$GoogleCloudAiplatformV1ModelDataStats; /** - * Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + * All of the files that are exported in this export operation. For custom code training export, only three (training, validation and test) Cloud Storage paths in wildcard format are populated (for example, gs://.../training-*). */ - outputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata; - } | null; + exportedFiles?: string[] | null; } /** - * Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. + * Details of operations that exports Features values. */ - export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadata { + export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesOperationMetadata { /** - * Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + * Operation metadata for Featurestore export Feature values. */ - denseShapeTensorName?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Request message for FeaturestoreService.ExportFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequest { /** - * A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor. + * Required. Specifies destination location and format. */ - encodedBaselines?: any[] | null; + destination?: Schema$GoogleCloudAiplatformV1FeatureValueDestination; /** - * Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table. + * Required. Selects Features to export values of. */ - encodedTensorName?: string | null; + featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; /** - * Defines how the feature is encoded into the input tensor. Defaults to IDENTITY. + * Exports all historical values of all entities of the EntityType within a time range */ - encoding?: string | null; + fullExport?: Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport; /** - * The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized. + * Per-Feature export settings. */ - featureValueDomain?: Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain; + settings?: Schema$GoogleCloudAiplatformV1DestinationFeatureSetting[]; /** - * Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name. + * Exports the latest Feature values of all entities of the EntityType within a time range. */ - groupName?: string | null; + snapshotExport?: Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport; + } + /** + * Describes exporting all historical Feature values of all entities of the EntityType between [start_time, end_time]. + */ + export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { /** - * A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. + * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. */ - indexFeatureMapping?: string[] | null; + endTime?: string | null; /** - * Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. */ - indicesTensorName?: string | null; - /** - * Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature's input in the instance[]. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. + startTime?: string | null; + } + /** + * Describes exporting the latest Feature values of all entities of the EntityType between [start_time, snapshot_time]. + */ + export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { + /** + * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. */ - inputBaselines?: any[] | null; + snapshotTime?: string | null; /** - * Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow. + * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. */ - inputTensorName?: string | null; + startTime?: string | null; + } + /** + * Response message for FeaturestoreService.ExportFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesResponse {} + /** + * Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets. + */ + export interface Schema$GoogleCloudAiplatformV1ExportFilterSplit { /** - * Modality of the feature. Valid values are: numeric, image. Defaults to numeric. + * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. */ - modality?: string | null; + testFilter?: string | null; /** - * Visualization configurations for image explanation. + * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. */ - visualization?: Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization; + trainingFilter?: string | null; + /** + * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + */ + validationFilter?: string | null; } /** - * Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. + * Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. */ - export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain { + export interface Schema$GoogleCloudAiplatformV1ExportFractionSplit { /** - * The maximum permissible value for this feature. + * The fraction of the input data that is to be used to evaluate the Model. */ - maxValue?: number | null; + testFraction?: number | null; /** - * The minimum permissible value for this feature. + * The fraction of the input data that is to be used to train the Model. */ - minValue?: number | null; + trainingFraction?: number | null; /** - * If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization. + * The fraction of the input data that is to be used to validate the Model. */ - originalMean?: number | null; + validationFraction?: number | null; + } + /** + * Details of ModelService.ExportModel operation. + */ + export interface Schema$GoogleCloudAiplatformV1ExportModelOperationMetadata { /** - * If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization. + * The common part of the operation metadata. */ - originalStddev?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + /** + * Output only. Information further describing the output of this Model export. + */ + outputInfo?: Schema$GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo; } /** - * Visualization configurations for image explanation. + * Further describes the output of the ExportModel. Supplements ExportModelRequest.OutputConfig. */ - export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization { + export interface Schema$GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo { /** - * Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62. + * Output only. If the Model artifact is being exported to Google Cloud Storage this is the full path of the directory created, into which the Model files are being written to. */ - clipPercentLowerbound?: number | null; + artifactOutputUri?: string | null; /** - * Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9. + * Output only. If the Model image is being exported to Google Container Registry or Artifact Registry this is the full path of the image created. */ - clipPercentUpperbound?: number | null; + imageOutputUri?: string | null; + } + /** + * Request message for ModelService.ExportModel. + */ + export interface Schema$GoogleCloudAiplatformV1ExportModelRequest { /** - * The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue. + * Required. The desired output location and configuration. */ - colorMap?: string | null; + outputConfig?: Schema$GoogleCloudAiplatformV1ExportModelRequestOutputConfig; + } + /** + * Output configuration for the Model export. + */ + export interface Schema$GoogleCloudAiplatformV1ExportModelRequestOutputConfig { /** - * How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE. + * The Cloud Storage location where the Model artifact is to be written to. Under the directory given as the destination a new one with name "`model-export--`", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside, the Model and any of its supporting files will be written. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `ARTIFACT`. */ - overlayType?: string | null; + artifactDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; /** - * Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE. + * The ID of the format in which the Model must be exported. Each Model lists the export formats it supports. If no value is provided here, then the first from the list of the Model's supported formats is used by default. */ - polarity?: string | null; + exportFormatId?: string | null; /** - * Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES. + * The Google Container Registry or Artifact Registry uri where the Model container image will be copied to. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `IMAGE`. */ - type?: string | null; + imageDestination?: Schema$GoogleCloudAiplatformV1ContainerRegistryDestination; } /** - * Metadata of the prediction output to be explained. + * Response message of ModelService.ExportModel operation. */ - export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata { + export interface Schema$GoogleCloudAiplatformV1ExportModelResponse {} + /** + * Request message for TensorboardService.ExportTensorboardTimeSeriesData. + */ + export interface Schema$GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataRequest { /** - * Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output. + * Exports the TensorboardTimeSeries' data that match the filter expression. */ - displayNameMappingKey?: string | null; + filter?: string | null; /** - * Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It's not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index. + * Field to use to sort the TensorboardTimeSeries' data. By default, TensorboardTimeSeries' data is returned in a pseudo random order. */ - indexDisplayNameMapping?: any | null; + orderBy?: string | null; /** - * Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow. + * The maximum number of data points to return per page. The default page_size is 1000. Values must be between 1 and 10000. Values above 10000 are coerced to 10000. */ - outputTensorName?: string | null; - } - /** - * The ExplanationMetadata entries that can be overridden at online explanation time. - */ - export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataOverride { + pageSize?: number | null; /** - * Required. Overrides the input metadata of the features. The key is the name of the feature to be overridden. The keys specified here must exist in the input metadata to be overridden. If a feature is not specified here, the corresponding feature's input metadata is not overridden. + * A page token, received from a previous ExportTensorboardTimeSeriesData call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ExportTensorboardTimeSeriesData must match the call that provided the page token. */ - inputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride; - } | null; + pageToken?: string | null; } /** - * The input metadata entries to be overridden. + * Response message for TensorboardService.ExportTensorboardTimeSeriesData. */ - export interface Schema$GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride { + export interface Schema$GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { /** - * Baseline inputs for this feature. This overrides the `input_baseline` field of the ExplanationMetadata.InputMetadata object of the corresponding feature's input metadata. If it's not specified, the original baselines are not overridden. + * A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - inputBaselines?: any[] | null; + nextPageToken?: string | null; + /** + * The returned time series data points. + */ + timeSeriesDataPoints?: Schema$GoogleCloudAiplatformV1TimeSeriesDataPoint[]; } /** - * Parameters to configure explaining for Model's predictions. + * Feature Metadata information. For example, color is a feature that describes an apple. */ - export interface Schema$GoogleCloudAiplatformV1ExplanationParameters { + export interface Schema$GoogleCloudAiplatformV1Feature { /** - * Example-based explanations that returns the nearest neighbors from the provided dataset. + * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was created. */ - examples?: Schema$GoogleCloudAiplatformV1Examples; + createTime?: string | null; /** - * An attribution method that computes Aumann-Shapley values taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + * Description of the Feature. */ - integratedGradientsAttribution?: Schema$GoogleCloudAiplatformV1IntegratedGradientsAttribution; + description?: string | null; /** - * If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + * Optional. Only applicable for Vertex AI Feature Store (Legacy). If not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If set to true, all types of data monitoring are disabled despite the config on EntityType. */ - outputIndices?: any[] | null; + disableMonitoring?: boolean | null; /** - * An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265. + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - sampledShapleyAttribution?: Schema$GoogleCloudAiplatformV1SampledShapleyAttribution; + etag?: string | null; /** - * If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + * Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Feature (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - topK?: number | null; + labels?: {[key: string]: string} | null; /** - * An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead. + * Output only. Only applicable for Vertex AI Feature Store (Legacy). The list of historical stats and anomalies with specified objectives. */ - xraiAttribution?: Schema$GoogleCloudAiplatformV1XraiAttribution; - } - /** - * Specification of Model explanation. - */ - export interface Schema$GoogleCloudAiplatformV1ExplanationSpec { + monitoringStatsAnomalies?: Schema$GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly[]; /** - * Optional. Metadata describing the Model's input and output for explanation. + * Immutable. Name of the Feature. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}/features/{feature\}` `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}/features/{feature\}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. */ - metadata?: Schema$GoogleCloudAiplatformV1ExplanationMetadata; + name?: string | null; /** - * Required. Parameters that configure explaining of the Model's predictions. + * Entity responsible for maintaining this feature. Can be comma separated list of email addresses or URIs. */ - parameters?: Schema$GoogleCloudAiplatformV1ExplanationParameters; - } - /** - * The ExplanationSpec entries that can be overridden at online explanation time. - */ - export interface Schema$GoogleCloudAiplatformV1ExplanationSpecOverride { + pointOfContact?: string | null; /** - * The example-based explanations parameter overrides. + * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was most recently updated. */ - examplesOverride?: Schema$GoogleCloudAiplatformV1ExamplesOverride; + updateTime?: string | null; /** - * The metadata to be overridden. If not specified, no metadata is overridden. + * Immutable. Only applicable for Vertex AI Feature Store (Legacy). Type of Feature value. */ - metadata?: Schema$GoogleCloudAiplatformV1ExplanationMetadataOverride; + valueType?: string | null; /** - * The parameters to be overridden. Note that the attribution method cannot be changed. If not specified, no parameter is overridden. + * Only applicable for Vertex AI Feature Store. The name of the BigQuery Table/View column hosting data for this version. If no value is provided, will use feature_id. */ - parameters?: Schema$GoogleCloudAiplatformV1ExplanationParameters; + versionColumnName?: string | null; } /** - * Describes what part of the Dataset is to be exported, the destination of the export and how to export. + * Vertex AI Feature Group. */ - export interface Schema$GoogleCloudAiplatformV1ExportDataConfig { + export interface Schema$GoogleCloudAiplatformV1FeatureGroup { /** - * The Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only used for custom training data export use cases. Only applicable to Datasets that have DataItems and Annotations. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. + * Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source. The BigQuery source table or view must have at least one entity ID column and a column named `feature_timestamp`. */ - annotationSchemaUri?: string | null; + bigQuery?: Schema$GoogleCloudAiplatformV1FeatureGroupBigQuery; /** - * An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations. + * Output only. Timestamp when this FeatureGroup was created. */ - annotationsFilter?: string | null; + createTime?: string | null; /** - * Indicates the usage of the exported files. + * Optional. Description of the FeatureGroup. */ - exportUse?: string | null; + description?: string | null; /** - * Split based on the provided filters for each set. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - filterSplit?: Schema$GoogleCloudAiplatformV1ExportFilterSplit; + etag?: string | null; /** - * Split based on fractions defining the size of each set. + * Optional. The labels with user-defined metadata to organize your FeatureGroup. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureGroup(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - fractionSplit?: Schema$GoogleCloudAiplatformV1ExportFractionSplit; + labels?: {[key: string]: string} | null; /** - * The Google Cloud Storage location where the output is to be written to. In the given directory a new directory will be created with name: `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. Inside that directory, annotations with the same schema will be grouped into sub directories which are named with the corresponding annotations' schema title. Inside these sub directories, a schema.yaml will be created to describe the output format. + * Identifier. Name of the FeatureGroup. Format: `projects/{project\}/locations/{location\}/featureGroups/{featureGroup\}` */ - gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; + name?: string | null; /** - * The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only used for custom training data export use cases. Only applicable to Datasets that have SavedQueries. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. + * Output only. Timestamp when this FeatureGroup was last updated. */ - savedQueryId?: string | null; + updateTime?: string | null; } /** - * Runtime operation information for DatasetService.ExportData. + * Input source type for BigQuery Tables and Views. */ - export interface Schema$GoogleCloudAiplatformV1ExportDataOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1FeatureGroupBigQuery { /** - * A Google Cloud Storage directory which path ends with '/'. The exported data is stored in the directory. + * Required. Immutable. The BigQuery source URI that points to either a BigQuery Table or View. */ - gcsOutputDirectory?: string | null; + bigQuerySource?: Schema$GoogleCloudAiplatformV1BigQuerySource; /** - * The common part of the operation metadata. + * Optional. Columns to construct entity_id / row keys. If not provided defaults to `entity_id`. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + entityIdColumns?: string[] | null; } /** - * Request message for DatasetService.ExportData. + * A list of historical SnapshotAnalysis or ImportFeaturesAnalysis stats requested by user, sorted by FeatureStatsAnomaly.start_time descending. */ - export interface Schema$GoogleCloudAiplatformV1ExportDataRequest { + export interface Schema$GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly { /** - * Required. The desired output location. + * Output only. The stats and anomalies generated at specific timestamp. */ - exportConfig?: Schema$GoogleCloudAiplatformV1ExportDataConfig; - } - /** - * Response message for DatasetService.ExportData. - */ - export interface Schema$GoogleCloudAiplatformV1ExportDataResponse { - /** - * Only present for custom code training export use case. Records data stats, i.e., train/validation/test item/annotation counts calculated during the export operation. - */ - dataStats?: Schema$GoogleCloudAiplatformV1ModelDataStats; + featureStatsAnomaly?: Schema$GoogleCloudAiplatformV1FeatureStatsAnomaly; /** - * All of the files that are exported in this export operation. For custom code training export, only three (training, validation and test) Cloud Storage paths in wildcard format are populated (for example, gs://.../training-*). + * Output only. The objective for each stats. */ - exportedFiles?: string[] | null; + objective?: string | null; } /** - * Details of operations that exports Features values. + * Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. */ - export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1FeatureNoiseSigma { /** - * Operation metadata for Featurestore export Feature values. + * Noise sigma per feature. No noise is added to features that are not set. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + noiseSigma?: Schema$GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature[]; } /** - * Request message for FeaturestoreService.ExportFeatureValues. + * Noise sigma for a single feature. */ - export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequest { + export interface Schema$GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature { /** - * Required. Specifies destination location and format. + * The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. */ - destination?: Schema$GoogleCloudAiplatformV1FeatureValueDestination; + name?: string | null; /** - * Required. Selects Features to export values of. + * This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. */ - featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; + sigma?: number | null; + } + /** + * Vertex AI Feature Online Store provides a centralized repository for serving ML features and embedding indexes at low latency. The Feature Online Store is a top-level container. + */ + export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStore { /** - * Exports all historical values of all entities of the EntityType within a time range + * Contains settings for the Cloud Bigtable instance that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. */ - fullExport?: Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport; + bigtable?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtable; /** - * Per-Feature export settings. + * Output only. Timestamp when this FeatureOnlineStore was created. */ - settings?: Schema$GoogleCloudAiplatformV1DestinationFeatureSetting[]; + createTime?: string | null; /** - * Exports the latest Feature values of all entities of the EntityType within a time range. + * Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. */ - snapshotExport?: Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport; - } - /** - * Describes exporting all historical Feature values of all entities of the EntityType between [start_time, end_time]. - */ - export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { + dedicatedServingEndpoint?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint; /** - * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - endTime?: string | null; + etag?: string | null; /** - * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. + * Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - startTime?: string | null; - } - /** - * Describes exporting the latest Feature values of all entities of the EntityType between [start_time, snapshot_time]. - */ - export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { + labels?: {[key: string]: string} | null; /** - * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. + * Identifier. Name of the FeatureOnlineStore. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{featureOnlineStore\}` */ - snapshotTime?: string | null; + name?: string | null; /** - * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. + * Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. */ - startTime?: string | null; - } - /** - * Response message for FeaturestoreService.ExportFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1ExportFeatureValuesResponse {} - /** - * Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets. - */ - export interface Schema$GoogleCloudAiplatformV1ExportFilterSplit { + optimized?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreOptimized; /** - * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + * Output only. State of the featureOnlineStore. */ - testFilter?: string | null; + state?: string | null; /** - * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + * Output only. Timestamp when this FeatureOnlineStore was last updated. */ - trainingFilter?: string | null; + updateTime?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtable { /** - * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + * Required. Autoscaling config applied to Bigtable Instance. */ - validationFilter?: string | null; + autoScaling?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling; } - /** - * Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. - */ - export interface Schema$GoogleCloudAiplatformV1ExportFractionSplit { + export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling { /** - * The fraction of the input data that is to be used to evaluate the Model. + * Optional. A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%. */ - testFraction?: number | null; + cpuUtilizationTarget?: number | null; /** - * The fraction of the input data that is to be used to train the Model. + * Required. The maximum number of nodes to scale up to. Must be greater than or equal to min_node_count, and less than or equal to 10 times of 'min_node_count'. */ - trainingFraction?: number | null; + maxNodeCount?: number | null; /** - * The fraction of the input data that is to be used to validate the Model. + * Required. The minimum number of nodes to scale down to. Must be greater than or equal to 1. */ - validationFraction?: number | null; + minNodeCount?: number | null; } /** - * Details of ModelService.ExportModel operation. + * The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. */ - export interface Schema$GoogleCloudAiplatformV1ExportModelOperationMetadata { - /** - * The common part of the operation metadata. - */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint { /** - * Output only. Information further describing the output of this Model export. + * Output only. This field will be populated with the domain name to use for this FeatureOnlineStore */ - outputInfo?: Schema$GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo; + publicEndpointDomainName?: string | null; } /** - * Further describes the output of the ExportModel. Supplements ExportModelRequest.OutputConfig. + * Optimized storage type */ - export interface Schema$GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo { - /** - * Output only. If the Model artifact is being exported to Google Cloud Storage this is the full path of the directory created, into which the Model files are being written to. - */ - artifactOutputUri?: string | null; - /** - * Output only. If the Model image is being exported to Google Container Registry or Artifact Registry this is the full path of the image created. - */ - imageOutputUri?: string | null; - } + export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreOptimized {} /** - * Request message for ModelService.ExportModel. + * Selector for Features of an EntityType. */ - export interface Schema$GoogleCloudAiplatformV1ExportModelRequest { + export interface Schema$GoogleCloudAiplatformV1FeatureSelector { /** - * Required. The desired output location and configuration. + * Required. Matches Features based on ID. */ - outputConfig?: Schema$GoogleCloudAiplatformV1ExportModelRequestOutputConfig; + idMatcher?: Schema$GoogleCloudAiplatformV1IdMatcher; } /** - * Output configuration for the Model export. + * Stats and Anomaly generated at specific timestamp for specific Feature. The start_time and end_time are used to define the time range of the dataset that current stats belongs to, e.g. prediction traffic is bucketed into prediction datasets by time window. If the Dataset is not defined by time window, start_time = end_time. Timestamp of the stats and anomalies always refers to end_time. Raw stats and anomalies are stored in stats_uri or anomaly_uri in the tensorflow defined protos. Field data_stats contains almost identical information with the raw stats in Vertex AI defined proto, for UI to display. */ - export interface Schema$GoogleCloudAiplatformV1ExportModelRequestOutputConfig { - /** - * The Cloud Storage location where the Model artifact is to be written to. Under the directory given as the destination a new one with name "`model-export--`", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside, the Model and any of its supporting files will be written. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `ARTIFACT`. - */ - artifactDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; - /** - * The ID of the format in which the Model must be exported. Each Model lists the export formats it supports. If no value is provided here, then the first from the list of the Model's supported formats is used by default. - */ - exportFormatId?: string | null; + export interface Schema$GoogleCloudAiplatformV1FeatureStatsAnomaly { /** - * The Google Container Registry or Artifact Registry uri where the Model container image will be copied to. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `IMAGE`. + * This is the threshold used when detecting anomalies. The threshold can be changed by user, so this one might be different from ThresholdConfig.value. */ - imageDestination?: Schema$GoogleCloudAiplatformV1ContainerRegistryDestination; - } - /** - * Response message of ModelService.ExportModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1ExportModelResponse {} - /** - * Request message for TensorboardService.ExportTensorboardTimeSeriesData. - */ - export interface Schema$GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataRequest { + anomalyDetectionThreshold?: number | null; /** - * Exports the TensorboardTimeSeries' data that match the filter expression. + * Path of the anomaly file for current feature values in Cloud Storage bucket. Format: gs:////anomalies. Example: gs://monitoring_bucket/feature_name/anomalies. Stats are stored as binary format with Protobuf message Anoamlies are stored as binary format with Protobuf message [tensorflow.metadata.v0.AnomalyInfo] (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). */ - filter?: string | null; + anomalyUri?: string | null; /** - * Field to use to sort the TensorboardTimeSeries' data. By default, TensorboardTimeSeries' data is returned in a pseudo random order. + * Deviation from the current stats to baseline stats. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. */ - orderBy?: string | null; + distributionDeviation?: number | null; /** - * The maximum number of data points to return per page. The default page_size is 1000. Values must be between 1 and 10000. Values above 10000 are coerced to 10000. + * The end timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), end_time indicates the timestamp of the data used to generate stats (e.g. timestamp we take snapshots for feature values). */ - pageSize?: number | null; + endTime?: string | null; /** - * A page token, received from a previous ExportTensorboardTimeSeriesData call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ExportTensorboardTimeSeriesData must match the call that provided the page token. + * Feature importance score, only populated when cross-feature monitoring is enabled. For now only used to represent feature attribution score within range [0, 1] for ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW and ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT. */ - pageToken?: string | null; - } - /** - * Response message for TensorboardService.ExportTensorboardTimeSeriesData. - */ - export interface Schema$GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { + score?: number | null; /** - * A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * The start timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), start_time is only used to indicate the monitoring intervals, so it always equals to (end_time - monitoring_interval). */ - nextPageToken?: string | null; + startTime?: string | null; /** - * The returned time series data points. + * Path of the stats file for current feature values in Cloud Storage bucket. Format: gs:////stats. Example: gs://monitoring_bucket/feature_name/stats. Stats are stored as binary format with Protobuf message [tensorflow.metadata.v0.FeatureNameStatistics](https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/statistics.proto). */ - timeSeriesDataPoints?: Schema$GoogleCloudAiplatformV1TimeSeriesDataPoint[]; + statsUri?: string | null; } /** - * Feature Metadata information. For example, color is a feature that describes an apple. + * Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. */ - export interface Schema$GoogleCloudAiplatformV1Feature { + export interface Schema$GoogleCloudAiplatformV1Featurestore { /** - * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was created. + * Output only. Timestamp when this Featurestore was created. */ createTime?: string | null; /** - * Description of the Feature. - */ - description?: string | null; - /** - * Optional. Only applicable for Vertex AI Feature Store (Legacy). If not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If set to true, all types of data monitoring are disabled despite the config on EntityType. + * Optional. Customer-managed encryption key spec for data storage. If set, both of the online and offline data storage will be secured by this key. */ - disableMonitoring?: boolean | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ etag?: string | null; /** - * Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Feature (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Optional. The labels with user-defined metadata to organize your Featurestore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Featurestore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ labels?: {[key: string]: string} | null; /** - * Output only. Only applicable for Vertex AI Feature Store (Legacy). The list of historical stats and anomalies with specified objectives. - */ - monitoringStatsAnomalies?: Schema$GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly[]; - /** - * Immutable. Name of the Feature. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}/features/{feature\}` `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}/features/{feature\}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. + * Output only. Name of the Featurestore. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}` */ name?: string | null; /** - * Entity responsible for maintaining this feature. Can be comma separated list of email addresses or URIs. + * Optional. Config for online storage resources. The field should not co-exist with the field of `OnlineStoreReplicationConfig`. If both of it and OnlineStoreReplicationConfig are unset, the feature store will not have an online store and cannot be used for online serving. */ - pointOfContact?: string | null; + onlineServingConfig?: Schema$GoogleCloudAiplatformV1FeaturestoreOnlineServingConfig; /** - * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was most recently updated. + * Optional. TTL in days for feature values that will be stored in online serving storage. The Feature Store online storage periodically removes obsolete feature values older than `online_storage_ttl_days` since the feature generation time. Note that `online_storage_ttl_days` should be less than or equal to `offline_storage_ttl_days` for each EntityType under a featurestore. If not set, default to 4000 days */ - updateTime?: string | null; + onlineStorageTtlDays?: number | null; /** - * Immutable. Only applicable for Vertex AI Feature Store (Legacy). Type of Feature value. + * Output only. State of the featurestore. */ - valueType?: string | null; + state?: string | null; /** - * Only applicable for Vertex AI Feature Store. The name of the BigQuery Table/View column hosting data for this version. If no value is provided, will use feature_id. + * Output only. Timestamp when this Featurestore was last updated. */ - versionColumnName?: string | null; + updateTime?: string | null; } /** - * Vertex AI Feature Group. + * Configuration of how features in Featurestore are monitored. */ - export interface Schema$GoogleCloudAiplatformV1FeatureGroup { + export interface Schema$GoogleCloudAiplatformV1FeaturestoreMonitoringConfig { /** - * Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source. The BigQuery source table or view must have at least one entity ID column and a column named `feature_timestamp`. + * Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING). */ - bigQuery?: Schema$GoogleCloudAiplatformV1FeatureGroupBigQuery; + categoricalThresholdConfig?: Schema$GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig; /** - * Output only. Timestamp when this FeatureGroup was created. - */ - createTime?: string | null; - /** - * Optional. Description of the FeatureGroup. - */ - description?: string | null; - /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * Optional. The labels with user-defined metadata to organize your FeatureGroup. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureGroup(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. - */ - labels?: {[key: string]: string} | null; - /** - * Identifier. Name of the FeatureGroup. Format: `projects/{project\}/locations/{location\}/featureGroups/{featureGroup\}` - */ - name?: string | null; - /** - * Output only. Timestamp when this FeatureGroup was last updated. - */ - updateTime?: string | null; - } - /** - * Input source type for BigQuery Tables and Views. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureGroupBigQuery { - /** - * Required. Immutable. The BigQuery source URI that points to either a BigQuery Table or View. - */ - bigQuerySource?: Schema$GoogleCloudAiplatformV1BigQuerySource; - /** - * Optional. Columns to construct entity_id / row keys. If not provided defaults to `entity_id`. - */ - entityIdColumns?: string[] | null; - } - /** - * A list of historical SnapshotAnalysis or ImportFeaturesAnalysis stats requested by user, sorted by FeatureStatsAnomaly.start_time descending. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly { - /** - * Output only. The stats and anomalies generated at specific timestamp. - */ - featureStatsAnomaly?: Schema$GoogleCloudAiplatformV1FeatureStatsAnomaly; - /** - * Output only. The objective for each stats. - */ - objective?: string | null; - } - /** - * Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureNoiseSigma { - /** - * Noise sigma per feature. No noise is added to features that are not set. - */ - noiseSigma?: Schema$GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature[]; - } - /** - * Noise sigma for a single feature. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature { - /** - * The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. - */ - name?: string | null; - /** - * This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. - */ - sigma?: number | null; - } - /** - * Vertex AI Feature Online Store provides a centralized repository for serving ML features and embedding indexes at low latency. The Feature Online Store is a top-level container. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStore { - /** - * Contains settings for the Cloud Bigtable instance that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. - */ - bigtable?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtable; - /** - * Output only. Timestamp when this FeatureOnlineStore was created. - */ - createTime?: string | null; - /** - * Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. - */ - dedicatedServingEndpoint?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint; - /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. - */ - labels?: {[key: string]: string} | null; - /** - * Identifier. Name of the FeatureOnlineStore. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{featureOnlineStore\}` - */ - name?: string | null; - /** - * Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. - */ - optimized?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreOptimized; - /** - * Output only. State of the featureOnlineStore. - */ - state?: string | null; - /** - * Output only. Timestamp when this FeatureOnlineStore was last updated. - */ - updateTime?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtable { - /** - * Required. Autoscaling config applied to Bigtable Instance. - */ - autoScaling?: Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling; - } - export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling { - /** - * Optional. A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%. - */ - cpuUtilizationTarget?: number | null; - /** - * Required. The maximum number of nodes to scale up to. Must be greater than or equal to min_node_count, and less than or equal to 10 times of 'min_node_count'. - */ - maxNodeCount?: number | null; - /** - * Required. The minimum number of nodes to scale down to. Must be greater than or equal to 1. - */ - minNodeCount?: number | null; - } - /** - * The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint { - /** - * Output only. This field will be populated with the domain name to use for this FeatureOnlineStore - */ - publicEndpointDomainName?: string | null; - } - /** - * Optimized storage type - */ - export interface Schema$GoogleCloudAiplatformV1FeatureOnlineStoreOptimized {} - /** - * Selector for Features of an EntityType. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureSelector { - /** - * Required. Matches Features based on ID. - */ - idMatcher?: Schema$GoogleCloudAiplatformV1IdMatcher; - } - /** - * Stats and Anomaly generated at specific timestamp for specific Feature. The start_time and end_time are used to define the time range of the dataset that current stats belongs to, e.g. prediction traffic is bucketed into prediction datasets by time window. If the Dataset is not defined by time window, start_time = end_time. Timestamp of the stats and anomalies always refers to end_time. Raw stats and anomalies are stored in stats_uri or anomaly_uri in the tensorflow defined protos. Field data_stats contains almost identical information with the raw stats in Vertex AI defined proto, for UI to display. - */ - export interface Schema$GoogleCloudAiplatformV1FeatureStatsAnomaly { - /** - * This is the threshold used when detecting anomalies. The threshold can be changed by user, so this one might be different from ThresholdConfig.value. - */ - anomalyDetectionThreshold?: number | null; - /** - * Path of the anomaly file for current feature values in Cloud Storage bucket. Format: gs:////anomalies. Example: gs://monitoring_bucket/feature_name/anomalies. Stats are stored as binary format with Protobuf message Anoamlies are stored as binary format with Protobuf message [tensorflow.metadata.v0.AnomalyInfo] (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). - */ - anomalyUri?: string | null; - /** - * Deviation from the current stats to baseline stats. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. - */ - distributionDeviation?: number | null; - /** - * The end timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), end_time indicates the timestamp of the data used to generate stats (e.g. timestamp we take snapshots for feature values). - */ - endTime?: string | null; - /** - * Feature importance score, only populated when cross-feature monitoring is enabled. For now only used to represent feature attribution score within range [0, 1] for ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW and ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT. - */ - score?: number | null; - /** - * The start timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), start_time is only used to indicate the monitoring intervals, so it always equals to (end_time - monitoring_interval). - */ - startTime?: string | null; - /** - * Path of the stats file for current feature values in Cloud Storage bucket. Format: gs:////stats. Example: gs://monitoring_bucket/feature_name/stats. Stats are stored as binary format with Protobuf message [tensorflow.metadata.v0.FeatureNameStatistics](https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/statistics.proto). - */ - statsUri?: string | null; - } - /** - * Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. - */ - export interface Schema$GoogleCloudAiplatformV1Featurestore { - /** - * Output only. Timestamp when this Featurestore was created. - */ - createTime?: string | null; - /** - * Optional. Customer-managed encryption key spec for data storage. If set, both of the online and offline data storage will be secured by this key. - */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; - /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * Optional. The labels with user-defined metadata to organize your Featurestore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Featurestore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. - */ - labels?: {[key: string]: string} | null; - /** - * Output only. Name of the Featurestore. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}` - */ - name?: string | null; - /** - * Optional. Config for online storage resources. The field should not co-exist with the field of `OnlineStoreReplicationConfig`. If both of it and OnlineStoreReplicationConfig are unset, the feature store will not have an online store and cannot be used for online serving. - */ - onlineServingConfig?: Schema$GoogleCloudAiplatformV1FeaturestoreOnlineServingConfig; - /** - * Optional. TTL in days for feature values that will be stored in online serving storage. The Feature Store online storage periodically removes obsolete feature values older than `online_storage_ttl_days` since the feature generation time. Note that `online_storage_ttl_days` should be less than or equal to `offline_storage_ttl_days` for each EntityType under a featurestore. If not set, default to 4000 days - */ - onlineStorageTtlDays?: number | null; - /** - * Output only. State of the featurestore. - */ - state?: string | null; - /** - * Output only. Timestamp when this Featurestore was last updated. - */ - updateTime?: string | null; - } - /** - * Configuration of how features in Featurestore are monitored. - */ - export interface Schema$GoogleCloudAiplatformV1FeaturestoreMonitoringConfig { - /** - * Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING). - */ - categoricalThresholdConfig?: Schema$GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig; - /** - * The config for ImportFeatures Analysis Based Feature Monitoring. + * The config for ImportFeatures Analysis Based Feature Monitoring. */ importFeaturesAnalysis?: Schema$GoogleCloudAiplatformV1FeaturestoreMonitoringConfigImportFeaturesAnalysis; /** @@ -4669,6 +4158,19 @@ export namespace aiplatform_v1 { * Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. */ perCrowdingAttributeNeighborCount?: number | null; + /** + * Optional. Represents RRF algorithm that combines search results. + */ + rrf?: Schema$GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF; + } + /** + * Parameters for RRF algorithm that combines search results. + */ + export interface Schema$GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF { + /** + * Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. + */ + alpha?: number | null; } /** * The response message for MatchService.FindNeighbors. @@ -4704,6 +4206,10 @@ export namespace aiplatform_v1 { * The distance between the neighbor and the dense embedding query. */ distance?: number | null; + /** + * The distance between the neighbor and the query sparse_embedding. + */ + sparseDistance?: number | null; } /** * Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. @@ -5254,6 +4760,10 @@ export namespace aiplatform_v1 { * Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses categorical tokens. See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering */ restricts?: Schema$GoogleCloudAiplatformV1IndexDatapointRestriction[]; + /** + * Optional. Feature embedding vector for sparse index. + */ + sparseEmbedding?: Schema$GoogleCloudAiplatformV1IndexDatapointSparseEmbedding; } /** * Crowding tag is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. @@ -5307,18 +4817,31 @@ export namespace aiplatform_v1 { namespace?: string | null; } /** - * Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes. + * Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. */ - export interface Schema$GoogleCloudAiplatformV1IndexEndpoint { + export interface Schema$GoogleCloudAiplatformV1IndexDatapointSparseEmbedding { /** - * Output only. Timestamp when this IndexEndpoint was created. + * Required. The list of indexes for the embedding values of the sparse vector. */ - createTime?: string | null; + dimensions?: string[] | null; /** - * Output only. The indexes deployed in this endpoint. + * Required. The list of embedding values of the sparse vector. */ - deployedIndexes?: Schema$GoogleCloudAiplatformV1DeployedIndex[]; - /** + values?: number[] | null; + } + /** + * Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes. + */ + export interface Schema$GoogleCloudAiplatformV1IndexEndpoint { + /** + * Output only. Timestamp when this IndexEndpoint was created. + */ + createTime?: string | null; + /** + * Output only. The indexes deployed in this endpoint. + */ + deployedIndexes?: Schema$GoogleCloudAiplatformV1DeployedIndex[]; + /** * The description of the IndexEndpoint. */ description?: string | null; @@ -5392,6 +4915,10 @@ export namespace aiplatform_v1 { * Output only. The number of shards in the Index. */ shardsCount?: number | null; + /** + * Output only. The number of sparse vectors in the Index. + */ + sparseVectorsCount?: string | null; /** * Output only. The number of dense vectors in the Index. */ @@ -12026,3444 +11553,2286 @@ export namespace aiplatform_v1 { * Output only. The actual training cost of the model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. */ trainCostMilliNodeHours?: string | null; - } - /** - * Config that contains the strategy used to generate sliding windows in time series training. A window is a series of rows that comprise the context up to the time of prediction, and the horizon following. The corresponding row for each window marks the start of the forecast horizon. Each window is used as an input example for training/evaluation. - */ - export interface Schema$GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig { - /** - * Name of the column that should be used to generate sliding windows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window with the horizon starting at that row. The column will not be used as a feature in training. - */ - column?: string | null; - /** - * Maximum number of windows that should be generated across all time series. - */ - maxCount?: string | null; - /** - * Stride length used to generate input examples. Within one time series, every {$STRIDE_LENGTH\} rows will be used to generate a sliding window. - */ - strideLength?: string | null; - } - /** - * A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1. - */ - export interface Schema$GoogleCloudAiplatformV1SchemaVertex { - /** - * X coordinate. - */ - x?: number | null; - /** - * Y coordinate. - */ - y?: number | null; - } - /** - * Annotation details specific to video action recognition. - */ - export interface Schema$GoogleCloudAiplatformV1SchemaVideoActionRecognitionAnnotation { - /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. - */ - annotationSpecId?: string | null; - /** - * The display name of the AnnotationSpec that this Annotation pertains to. - */ - displayName?: string | null; - /** - * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. - */ - timeSegment?: Schema$GoogleCloudAiplatformV1SchemaTimeSegment; - } - /** - * Annotation details specific to video classification. - */ - export interface Schema$GoogleCloudAiplatformV1SchemaVideoClassificationAnnotation { - /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. - */ - annotationSpecId?: string | null; - /** - * The display name of the AnnotationSpec that this Annotation pertains to. - */ - displayName?: string | null; - /** - * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. - */ - timeSegment?: Schema$GoogleCloudAiplatformV1SchemaTimeSegment; - } - /** - * Payload of Video DataItem. - */ - export interface Schema$GoogleCloudAiplatformV1SchemaVideoDataItem { - /** - * Required. Google Cloud Storage URI points to the original video in user's bucket. The video is up to 50 GB in size and up to 3 hour in duration. - */ - gcsUri?: string | null; - /** - * Output only. The mime type of the content of the video. Only the videos in below listed mime types are supported. Supported mime_type: - video/mp4 - video/avi - video/quicktime - */ - mimeType?: string | null; - } - /** - * The metadata of Datasets that contain Video DataItems. - */ - export interface Schema$GoogleCloudAiplatformV1SchemaVideoDatasetMetadata { - /** - * Points to a YAML file stored on Google Cloud Storage describing payload of the Video DataItems that belong to this Dataset. - */ - dataItemSchemaUri?: string | null; - /** - * Google Cloud Storage Bucket name that contains the blob data of this Dataset. - */ - gcsBucket?: string | null; - } - /** - * Annotation details specific to video object tracking. - */ - export interface Schema$GoogleCloudAiplatformV1SchemaVideoObjectTrackingAnnotation { - /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. - */ - annotationSpecId?: string | null; - /** - * The display name of the AnnotationSpec that this Annotation pertains to. - */ - displayName?: string | null; - /** - * The instance of the object, expressed as a positive integer. Used to track the same object across different frames. - */ - instanceId?: string | null; - /** - * A time (frame) of a video to which this annotation pertains. Represented as the duration since the video's start. - */ - timeOffset?: string | null; - /** - * The rightmost coordinate of the bounding box. - */ - xMax?: number | null; - /** - * The leftmost coordinate of the bounding box. - */ - xMin?: number | null; - /** - * The bottommost coordinate of the bounding box. - */ - yMax?: number | null; - /** - * The topmost coordinate of the bounding box. - */ - yMin?: number | null; - } - export interface Schema$GoogleCloudAiplatformV1SchemaVisualInspectionClassificationLabelSavedQueryMetadata { - /** - * Whether or not the classification label is multi_label. - */ - multiLabel?: boolean | null; - } - export interface Schema$GoogleCloudAiplatformV1SchemaVisualInspectionMaskSavedQueryMetadata {} - /** - * Response message for DatasetService.SearchDataItems. - */ - export interface Schema$GoogleCloudAiplatformV1SearchDataItemsResponse { - /** - * The DataItemViews read. - */ - dataItemViews?: Schema$GoogleCloudAiplatformV1DataItemView[]; - /** - * A token to retrieve next page of results. Pass to SearchDataItemsRequest.page_token to obtain that page. - */ - nextPageToken?: string | null; - } - /** - * Google search entry point. - */ - export interface Schema$GoogleCloudAiplatformV1SearchEntryPoint { - /** - * Optional. Web content snippet that can be embedded in a web page or an app webview. - */ - renderedContent?: string | null; - /** - * Optional. Base64 encoded JSON representing array of tuple. - */ - sdkBlob?: string | null; - } - /** - * Response message for FeaturestoreService.SearchFeatures. - */ - export interface Schema$GoogleCloudAiplatformV1SearchFeaturesResponse { - /** - * The Features matching the request. Fields returned: * `name` * `description` * `labels` * `create_time` * `update_time` - */ - features?: Schema$GoogleCloudAiplatformV1Feature[]; - /** - * A token, which can be sent as SearchFeaturesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. - */ - nextPageToken?: string | null; - } - /** - * Request message for MigrationService.SearchMigratableResources. - */ - export interface Schema$GoogleCloudAiplatformV1SearchMigratableResourcesRequest { - /** - * A filter for your search. You can use the following types of filters: * Resource type filters. The following strings filter for a specific type of MigratableResource: * `ml_engine_model_version:*` * `automl_model:*` * `automl_dataset:*` * `data_labeling_dataset:*` * "Migrated or not" filters. The following strings filter for resources that either have or have not already been migrated: * `last_migrate_time:*` filters for migrated resources. * `NOT last_migrate_time:*` filters for not yet migrated resources. - */ - filter?: string | null; - /** - * The standard page size. The default and maximum value is 100. - */ - pageSize?: number | null; - /** - * The standard page token. - */ - pageToken?: string | null; - } - /** - * Response message for MigrationService.SearchMigratableResources. - */ - export interface Schema$GoogleCloudAiplatformV1SearchMigratableResourcesResponse { - /** - * All migratable resources that can be migrated to the location specified in the request. - */ - migratableResources?: Schema$GoogleCloudAiplatformV1MigratableResource[]; - /** - * The standard next-page token. The migratable_resources may not fill page_size in SearchMigratableResourcesRequest even when there are subsequent pages. - */ - nextPageToken?: string | null; - } - /** - * Request message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. - */ - export interface Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest { - /** - * Required. The DeployedModel ID of the [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - */ - deployedModelId?: string | null; - /** - * The latest timestamp of stats being generated. If not set, indicates feching stats till the latest possible one. - */ - endTime?: string | null; - /** - * The feature display name. If specified, only return the stats belonging to this feature. Format: ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name, example: "user_destination". - */ - featureDisplayName?: string | null; - /** - * Required. Objectives of the stats to retrieve. - */ - objectives?: Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective[]; - /** - * The standard list page size. - */ - pageSize?: number | null; - /** - * A page token received from a previous JobService.SearchModelDeploymentMonitoringStatsAnomalies call. - */ - pageToken?: string | null; - /** - * The earliest timestamp of stats being generated. If not set, indicates fetching stats till the earliest possible one. - */ - startTime?: string | null; - } - /** - * Stats requested for specific objective. - */ - export interface Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective { - /** - * If set, all attribution scores between SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time and SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time are fetched, and page token doesn't take effect in this case. Only used to retrieve attribution score for the top Features which has the highest attribution score in the latest monitoring run. - */ - topFeatureCount?: number | null; - type?: string | null; - } - /** - * Response message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. - */ - export interface Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse { - /** - * Stats retrieved for requested objectives. There are at most 1000 ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats in the response. - */ - monitoringStats?: Schema$GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies[]; - /** - * The page token that can be used by the next JobService.SearchModelDeploymentMonitoringStatsAnomalies call. - */ - nextPageToken?: string | null; - } - /** - * The request message for FeatureOnlineStoreService.SearchNearestEntities. - */ - export interface Schema$GoogleCloudAiplatformV1SearchNearestEntitiesRequest { - /** - * Required. The query. - */ - query?: Schema$GoogleCloudAiplatformV1NearestNeighborQuery; - /** - * Optional. If set to true, the full entities (including all vector values and metadata) of the nearest neighbors are returned; otherwise only entity id of the nearest neighbors will be returned. Note that returning full entities will significantly increase the latency and cost of the query. - */ - returnFullEntity?: boolean | null; - } - /** - * Response message for FeatureOnlineStoreService.SearchNearestEntities - */ - export interface Schema$GoogleCloudAiplatformV1SearchNearestEntitiesResponse { - /** - * The nearest neighbors of the query entity. - */ - nearestNeighbors?: Schema$GoogleCloudAiplatformV1NearestNeighbors; - } - /** - * Configuration for the use of custom service account to run the workloads. - */ - export interface Schema$GoogleCloudAiplatformV1ServiceAccountSpec { - /** - * Required. If true, custom user-managed service account is enforced to run any workloads (for example, Vertex Jobs) on the resource. Otherwise, uses the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). - */ - enableCustomServiceAccount?: boolean | null; - /** - * Optional. Required when all below conditions are met * `enable_custom_service_account` is true; * any runtime is specified via `ResourceRuntimeSpec` on creation time, for example, Ray The users must have `iam.serviceAccounts.actAs` permission on this service account and then the specified runtime containers will run as it. Do not set this field if you want to submit jobs using custom service account to this PersistentResource after creation, but only specify the `service_account` inside the job. - */ - serviceAccount?: string | null; - } - /** - * A set of Shielded Instance options. See [Images using supported Shielded VM features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm). - */ - export interface Schema$GoogleCloudAiplatformV1ShieldedVmConfig { - /** - * Defines whether the instance has [Secure Boot](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot) enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. - */ - enableSecureBoot?: boolean | null; - } - /** - * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf - */ - export interface Schema$GoogleCloudAiplatformV1SmoothGradConfig { - /** - * This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features. - */ - featureNoiseSigma?: Schema$GoogleCloudAiplatformV1FeatureNoiseSigma; - /** - * This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. - */ - noiseSigma?: number | null; - /** - * The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. - */ - noisySampleCount?: number | null; - } - /** - * SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers and workers. Managers are responsible for managing the workers in this pool as well as customers' data labeling jobs associated with this pool. Customers create specialist pool as well as start data labeling jobs on Cloud, managers and workers handle the jobs using CrowdCompute console. - */ - export interface Schema$GoogleCloudAiplatformV1SpecialistPool { - /** - * Required. The user-defined name of the SpecialistPool. The name can be up to 128 characters long and can consist of any UTF-8 characters. This field should be unique on project-level. - */ - displayName?: string | null; - /** - * Required. The resource name of the SpecialistPool. - */ - name?: string | null; - /** - * Output only. The resource name of the pending data labeling jobs. - */ - pendingDataLabelingJobs?: string[] | null; - /** - * The email addresses of the managers in the SpecialistPool. - */ - specialistManagerEmails?: string[] | null; - /** - * Output only. The number of managers in this SpecialistPool. - */ - specialistManagersCount?: number | null; - /** - * The email addresses of workers in the SpecialistPool. - */ - specialistWorkerEmails?: string[] | null; - } - /** - * Metadata information for NotebookService.StartNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1StartNotebookRuntimeOperationMetadata { - /** - * The operation generic information. - */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - /** - * A human-readable message that shows the intermediate progress details of NotebookRuntime. - */ - progressMessage?: string | null; - } - /** - * Request message for NotebookService.StartNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1StartNotebookRuntimeRequest {} - /** - * Request message for VizierService.StopTrial. - */ - export interface Schema$GoogleCloudAiplatformV1StopTrialRequest {} - /** - * Assigns input data to the training, validation, and test sets so that the distribution of values found in the categorical column (as specified by the `key` field) is mirrored within each split. The fraction values determine the relative sizes of the splits. For example, if the specified column has three values, with 50% of the rows having value "A", 25% value "B", and 25% value "C", and the split fractions are specified as 80/10/10, then the training set will constitute 80% of the training data, with about 50% of the training set rows having the value "A" for the specified column, about 25% having the value "B", and about 25% having the value "C". Only the top 500 occurring values are used; any values not in the top 500 values are randomly assigned to a split. If less than three rows contain a specific value, those rows are randomly assigned. Supported only for tabular Datasets. - */ - export interface Schema$GoogleCloudAiplatformV1StratifiedSplit { - /** - * Required. The key is a name of one of the Dataset's data columns. The key provided must be for a categorical column. - */ - key?: string | null; - /** - * The fraction of the input data that is to be used to evaluate the Model. - */ - testFraction?: number | null; - /** - * The fraction of the input data that is to be used to train the Model. - */ - trainingFraction?: number | null; - /** - * The fraction of the input data that is to be used to validate the Model. - */ - validationFraction?: number | null; - } - /** - * Request message for PredictionService.StreamingPredict. The first message must contain endpoint field and optionally input. The subsequent messages must contain input. - */ - export interface Schema$GoogleCloudAiplatformV1StreamingPredictRequest { - /** - * The prediction input. - */ - inputs?: Schema$GoogleCloudAiplatformV1Tensor[]; - /** - * The parameters that govern the prediction. - */ - parameters?: Schema$GoogleCloudAiplatformV1Tensor; - } - /** - * Response message for PredictionService.StreamingPredict. - */ - export interface Schema$GoogleCloudAiplatformV1StreamingPredictResponse { - /** - * The prediction output. - */ - outputs?: Schema$GoogleCloudAiplatformV1Tensor[]; - /** - * The parameters that govern the prediction. - */ - parameters?: Schema$GoogleCloudAiplatformV1Tensor; - } - /** - * Request message for FeaturestoreOnlineServingService.StreamingFeatureValuesRead. - */ - export interface Schema$GoogleCloudAiplatformV1StreamingReadFeatureValuesRequest { - /** - * Required. IDs of entities to read Feature values of. The maximum number of IDs is 100. For example, for a machine learning model predicting user clicks on a website, an entity ID could be `user_123`. - */ - entityIds?: string[] | null; - /** - * Required. Selector choosing Features of the target EntityType. Feature IDs will be deduplicated. - */ - featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; - } - /** - * Request message for PredictionService.StreamRawPredict. - */ - export interface Schema$GoogleCloudAiplatformV1StreamRawPredictRequest { - /** - * The prediction input. Supports HTTP headers and arbitrary data payload. - */ - httpBody?: Schema$GoogleApiHttpBody; - } - /** - * A list of string values. - */ - export interface Schema$GoogleCloudAiplatformV1StringArray { - /** - * A list of string values. - */ - values?: string[] | null; - } - /** - * A message representing a Study. - */ - export interface Schema$GoogleCloudAiplatformV1Study { - /** - * Output only. Time at which the study was created. - */ - createTime?: string | null; - /** - * Required. Describes the Study, default value is empty string. - */ - displayName?: string | null; - /** - * Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. - */ - inactiveReason?: string | null; - /** - * Output only. The name of a study. The study's globally unique identifier. Format: `projects/{project\}/locations/{location\}/studies/{study\}` - */ - name?: string | null; - /** - * Output only. The detailed state of a Study. - */ - state?: string | null; - /** - * Required. Configuration of the Study. - */ - studySpec?: Schema$GoogleCloudAiplatformV1StudySpec; - } - /** - * Represents specification of a Study. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpec { - /** - * The search algorithm specified for the Study. - */ - algorithm?: string | null; - /** - * The automated early stopping spec using convex stopping rule. - */ - convexAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec; - /** - * The automated early stopping spec using decay curve rule. - */ - decayCurveStoppingSpec?: Schema$GoogleCloudAiplatformV1StudySpecDecayCurveAutomatedStoppingSpec; - /** - * Describe which measurement selection type will be used - */ - measurementSelectionType?: string | null; - /** - * The automated early stopping spec using median rule. - */ - medianAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1StudySpecMedianAutomatedStoppingSpec; - /** - * Required. Metric specs for the Study. - */ - metrics?: Schema$GoogleCloudAiplatformV1StudySpecMetricSpec[]; - /** - * The observation noise level of the study. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - observationNoise?: string | null; - /** - * Required. The set of parameters to tune. - */ - parameters?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpec[]; - /** - * Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. - */ - studyStoppingConfig?: Schema$GoogleCloudAiplatformV1StudySpecStudyStoppingConfig; - } - /** - * Configuration for ConvexAutomatedStoppingSpec. When there are enough completed trials (configured by min_measurement_count), for pending trials with enough measurements and steps, the policy first computes an overestimate of the objective value at max_num_steps according to the slope of the incomplete objective value curve. No prediction can be made if the curve is completely flat. If the overestimation is worse than the best objective value of the completed trials, this pending trial will be early-stopped, but a last measurement will be added to the pending trial with max_num_steps and predicted objective value from the autoregression model. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec { - /** - * The hyper-parameter name used in the tuning job that stands for learning rate. Leave it blank if learning rate is not in a parameter in tuning. The learning_rate is used to estimate the objective value of the ongoing trial. - */ - learningRateParameterName?: string | null; - /** - * Steps used in predicting the final objective for early stopped trials. In general, it's set to be the same as the defined steps in training / tuning. If not defined, it will learn it from the completed trials. When use_steps is false, this field is set to the maximum elapsed seconds. - */ - maxStepCount?: string | null; - /** - * The minimal number of measurements in a Trial. Early-stopping checks will not trigger if less than min_measurement_count+1 completed trials or pending trials with less than min_measurement_count measurements. If not defined, the default value is 5. - */ - minMeasurementCount?: string | null; - /** - * Minimum number of steps for a trial to complete. Trials which do not have a measurement with step_count \> min_step_count won't be considered for early stopping. It's ok to set it to 0, and a trial can be early stopped at any stage. By default, min_step_count is set to be one-tenth of the max_step_count. When use_elapsed_duration is true, this field is set to the minimum elapsed seconds. - */ - minStepCount?: string | null; - /** - * ConvexAutomatedStoppingSpec by default only updates the trials that needs to be early stopped using a newly trained auto-regressive model. When this flag is set to True, all stopped trials from the beginning are potentially updated in terms of their `final_measurement`. Also, note that the training logic of autoregressive models is different in this case. Enabling this option has shown better results and this may be the default option in the future. - */ - updateAllStoppedTrials?: boolean | null; - /** - * This bool determines whether or not the rule is applied based on elapsed_secs or steps. If use_elapsed_duration==false, the early stopping decision is made according to the predicted objective values according to the target steps. If use_elapsed_duration==true, elapsed_secs is used instead of steps. Also, in this case, the parameters max_num_steps and min_num_steps are overloaded to contain max_elapsed_seconds and min_elapsed_seconds. - */ - useElapsedDuration?: boolean | null; - } - /** - * The decay curve automated stopping rule builds a Gaussian Process Regressor to predict the final objective value of a Trial based on the already completed Trials and the intermediate measurements of the current Trial. Early stopping is requested for the current Trial if there is very low probability to exceed the optimal value found so far. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecDecayCurveAutomatedStoppingSpec { - /** - * True if Measurement.elapsed_duration is used as the x-axis of each Trials Decay Curve. Otherwise, Measurement.step_count will be used as the x-axis. - */ - useElapsedDuration?: boolean | null; - } - /** - * The median automated stopping rule stops a pending Trial if the Trial's best objective_value is strictly below the median 'performance' of all completed Trials reported up to the Trial's last measurement. Currently, 'performance' refers to the running average of the objective values reported by the Trial in each measurement. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecMedianAutomatedStoppingSpec { - /** - * True if median automated stopping rule applies on Measurement.elapsed_duration. It means that elapsed_duration field of latest measurement of current Trial is used to compute median objective value for each completed Trials. - */ - useElapsedDuration?: boolean | null; - } - /** - * Represents a metric to optimize. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecMetricSpec { - /** - * Required. The optimization goal of the metric. - */ - goal?: string | null; - /** - * Required. The ID of the metric. Must not contain whitespaces and must be unique amongst all MetricSpecs. - */ - metricId?: string | null; - /** - * Used for safe search. In the case, the metric will be a safety metric. You must provide a separate metric for objective metric. - */ - safetyConfig?: Schema$GoogleCloudAiplatformV1StudySpecMetricSpecSafetyMetricConfig; - } - /** - * Used in safe optimization to specify threshold levels and risk tolerance. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecMetricSpecSafetyMetricConfig { - /** - * Desired minimum fraction of safe trials (over total number of trials) that should be targeted by the algorithm at any time during the study (best effort). This should be between 0.0 and 1.0 and a value of 0.0 means that there is no minimum and an algorithm proceeds without targeting any specific fraction. A value of 1.0 means that the algorithm attempts to only Suggest safe Trials. - */ - desiredMinSafeTrialsFraction?: number | null; - /** - * Safety threshold (boundary value between safe and unsafe). NOTE that if you leave SafetyMetricConfig unset, a default value of 0 will be used. - */ - safetyThreshold?: number | null; - } - /** - * Represents a single parameter to optimize. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpec { - /** - * The value spec for a 'CATEGORICAL' parameter. - */ - categoricalValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecCategoricalValueSpec; - /** - * A conditional parameter node is active if the parameter's value matches the conditional node's parent_value_condition. If two items in conditional_parameter_specs have the same name, they must have disjoint parent_value_condition. - */ - conditionalParameterSpecs?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec[]; - /** - * The value spec for a 'DISCRETE' parameter. - */ - discreteValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDiscreteValueSpec; - /** - * The value spec for a 'DOUBLE' parameter. - */ - doubleValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDoubleValueSpec; - /** - * The value spec for an 'INTEGER' parameter. - */ - integerValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec; - /** - * Required. The ID of the parameter. Must not contain whitespaces and must be unique amongst all ParameterSpecs. - */ - parameterId?: string | null; - /** - * How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. - */ - scaleType?: string | null; - } - /** - * Value specification for a parameter in `CATEGORICAL` type. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecCategoricalValueSpec { - /** - * A default value for a `CATEGORICAL` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: string | null; - /** - * Required. The list of possible categories. - */ - values?: string[] | null; - } - /** - * Represents a parameter spec with condition from its parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec { - /** - * Required. The spec for a conditional parameter. - */ - parameterSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpec; - /** - * The spec for matching values from a parent parameter of `CATEGORICAL` type. - */ - parentCategoricalValues?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition; - /** - * The spec for matching values from a parent parameter of `DISCRETE` type. - */ - parentDiscreteValues?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition; - /** - * The spec for matching values from a parent parameter of `INTEGER` type. - */ - parentIntValues?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition; - } - /** - * Represents the spec to match categorical values from parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition { - /** - * Required. Matches values of the parent parameter of 'CATEGORICAL' type. All values must exist in `categorical_value_spec` of parent parameter. - */ - values?: string[] | null; - } - /** - * Represents the spec to match discrete values from parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition { - /** - * Required. Matches values of the parent parameter of 'DISCRETE' type. All values must exist in `discrete_value_spec` of parent parameter. The Epsilon of the value matching is 1e-10. - */ - values?: number[] | null; - } - /** - * Represents the spec to match integer values from parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition { - /** - * Required. Matches values of the parent parameter of 'INTEGER' type. All values must lie in `integer_value_spec` of parent parameter. - */ - values?: string[] | null; - } - /** - * Value specification for a parameter in `DISCRETE` type. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDiscreteValueSpec { - /** - * A default value for a `DISCRETE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. It automatically rounds to the nearest feasible discrete point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: number | null; - /** - * Required. A list of possible values. The list should be in increasing order and at least 1e-10 apart. For instance, this parameter might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. - */ - values?: number[] | null; - } - /** - * Value specification for a parameter in `DOUBLE` type. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDoubleValueSpec { - /** - * A default value for a `DOUBLE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: number | null; - /** - * Required. Inclusive maximum value of the parameter. - */ - maxValue?: number | null; - /** - * Required. Inclusive minimum value of the parameter. - */ - minValue?: number | null; - } - /** - * Value specification for a parameter in `INTEGER` type. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec { - /** - * A default value for an `INTEGER` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: string | null; - /** - * Required. Inclusive maximum value of the parameter. - */ - maxValue?: string | null; - /** - * Required. Inclusive minimum value of the parameter. - */ - minValue?: string | null; - } - /** - * The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. - */ - export interface Schema$GoogleCloudAiplatformV1StudySpecStudyStoppingConfig { - /** - * If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. - */ - maxDurationNoProgress?: string | null; - /** - * If the specified time or duration has passed, stop the study. - */ - maximumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1StudyTimeConstraint; - /** - * If there are more than this many trials, stop the study. - */ - maxNumTrials?: number | null; - /** - * If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. - */ - maxNumTrialsNoProgress?: number | null; - /** - * Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. - */ - minimumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1StudyTimeConstraint; - /** - * If there are fewer than this many COMPLETED trials, do not stop the study. - */ - minNumTrials?: number | null; - /** - * If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). - */ - shouldStopAsap?: boolean | null; - } - /** - * Time-based Constraint for Study - */ - export interface Schema$GoogleCloudAiplatformV1StudyTimeConstraint { - /** - * Compares the wallclock time to this time. Must use UTC timezone. - */ - endTime?: string | null; - /** - * Counts the wallclock time passed since the creation of this Study. - */ - maxDuration?: string | null; - } - /** - * Details of operations that perform Trials suggestion. - */ - export interface Schema$GoogleCloudAiplatformV1SuggestTrialsMetadata { - /** - * The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. - */ - clientId?: string | null; - /** - * Operation metadata for suggesting Trials. - */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Request message for VizierService.SuggestTrials. - */ - export interface Schema$GoogleCloudAiplatformV1SuggestTrialsRequest { - /** - * Required. The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. - */ - clientId?: string | null; - /** - * Optional. This allows you to specify the "context" for a Trial; a context is a slice (a subspace) of the search space. Typical uses for contexts: 1) You are using Vizier to tune a server for best performance, but there's a strong weekly cycle. The context specifies the day-of-week. This allows Tuesday to generalize from Wednesday without assuming that everything is identical. 2) Imagine you're optimizing some medical treatment for people. As they walk in the door, you know certain facts about them (e.g. sex, weight, height, blood-pressure). Put that information in the context, and Vizier will adapt its suggestions to the patient. 3) You want to do a fair A/B test efficiently. Specify the "A" and "B" conditions as contexts, and Vizier will generalize between "A" and "B" conditions. If they are similar, this will allow Vizier to converge to the optimum faster than if "A" and "B" were separate Studies. NOTE: You can also enter contexts as REQUESTED Trials, e.g. via the CreateTrial() RPC; that's the asynchronous option where you don't need a close association between contexts and suggestions. NOTE: All the Parameters you set in a context MUST be defined in the Study. NOTE: You must supply 0 or $suggestion_count contexts. If you don't supply any contexts, Vizier will make suggestions from the full search space specified in the StudySpec; if you supply a full set of context, each suggestion will match the corresponding context. NOTE: A Context with no features set matches anything, and allows suggestions from the full search space. NOTE: Contexts MUST lie within the search space specified in the StudySpec. It's an error if they don't. NOTE: Contexts preferentially match ACTIVE then REQUESTED trials before new suggestions are generated. NOTE: Generation of suggestions involves a match between a Context and (optionally) a REQUESTED trial; if that match is not fully specified, a suggestion will be geneated in the merged subspace. - */ - contexts?: Schema$GoogleCloudAiplatformV1TrialContext[]; - /** - * Required. The number of suggestions requested. It must be positive. - */ - suggestionCount?: number | null; - } - /** - * Response message for VizierService.SuggestTrials. - */ - export interface Schema$GoogleCloudAiplatformV1SuggestTrialsResponse { - /** - * The time at which operation processing completed. - */ - endTime?: string | null; - /** - * The time at which the operation was started. - */ - startTime?: string | null; - /** - * The state of the Study. - */ - studyState?: string | null; - /** - * A list of Trials. - */ - trials?: Schema$GoogleCloudAiplatformV1Trial[]; - } - /** - * Hyperparameters for SFT. - */ - export interface Schema$GoogleCloudAiplatformV1SupervisedHyperParameters { - /** - * Optional. Adapter size for tuning. - */ - adapterSize?: string | null; - /** - * Optional. Number of complete passes the model makes over the entire training dataset during training. - */ - epochCount?: string | null; - /** - * Optional. Multiplier for adjusting the default learning rate. - */ - learningRateMultiplier?: number | null; - } - /** - * Dataset distribution for Supervised Tuning. - */ - export interface Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution { - /** - * Output only. Defines the histogram bucket. - */ - buckets?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistributionDatasetBucket[]; - /** - * Output only. The maximum of the population values. - */ - max?: number | null; - /** - * Output only. The arithmetic mean of the values in the population. - */ - mean?: number | null; - /** - * Output only. The median of the values in the population. - */ - median?: number | null; - /** - * Output only. The minimum of the population values. - */ - min?: number | null; - /** - * Output only. The 5th percentile of the values in the population. - */ - p5?: number | null; - /** - * Output only. The 95th percentile of the values in the population. - */ - p95?: number | null; - /** - * Output only. Sum of a given population of values. - */ - sum?: string | null; - } - /** - * Dataset bucket used to create a histogram for the distribution given a population of values. - */ - export interface Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistributionDatasetBucket { - /** - * Output only. Number of values in the bucket. - */ - count?: number | null; - /** - * Output only. Left bound of the bucket. - */ - left?: number | null; - /** - * Output only. Right bound of the bucket. - */ - right?: number | null; - } - /** - * Tuning data statistics for Supervised Tuning. - */ - export interface Schema$GoogleCloudAiplatformV1SupervisedTuningDataStats { - /** - * Output only. Number of billable characters in the tuning dataset. - */ - totalBillableCharacterCount?: string | null; - /** - * Output only. Number of tuning characters in the tuning dataset. - */ - totalTuningCharacterCount?: string | null; - /** - * Output only. Number of examples in the tuning dataset. - */ - tuningDatasetExampleCount?: string | null; - /** - * Output only. Number of tuning steps for this Tuning Job. - */ - tuningStepCount?: string | null; - /** - * Output only. Sample user messages in the training dataset uri. - */ - userDatasetExamples?: Schema$GoogleCloudAiplatformV1Content[]; - /** - * Output only. Dataset distributions for the user input tokens. - */ - userInputTokenDistribution?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; - /** - * Output only. Dataset distributions for the messages per example. - */ - userMessagePerExampleDistribution?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; - /** - * Output only. Dataset distributions for the user output tokens. - */ - userOutputTokenDistribution?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; - } - /** - * Tuning Spec for Supervised Tuning. - */ - export interface Schema$GoogleCloudAiplatformV1SupervisedTuningSpec { - /** - * Optional. Hyperparameters for SFT. - */ - hyperParameters?: Schema$GoogleCloudAiplatformV1SupervisedHyperParameters; - /** - * Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. - */ - trainingDatasetUri?: string | null; - /** - * Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. - */ - validationDatasetUri?: string | null; - } - /** - * Request message for FeatureOnlineStoreAdminService.SyncFeatureView. - */ - export interface Schema$GoogleCloudAiplatformV1SyncFeatureViewRequest {} - /** - * Respose message for FeatureOnlineStoreAdminService.SyncFeatureView. - */ - export interface Schema$GoogleCloudAiplatformV1SyncFeatureViewResponse { - /** - * Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}/featureViewSyncs/{feature_view_sync\}` - */ - featureViewSync?: string | null; - } - /** - * A tensor value type. - */ - export interface Schema$GoogleCloudAiplatformV1Tensor { - /** - * Type specific representations that make it easy to create tensor protos in all languages. Only the representation corresponding to "dtype" can be set. The values hold the flattened representation of the tensor in row major order. BOOL - */ - boolVal?: boolean[] | null; - /** - * STRING - */ - bytesVal?: string[] | null; - /** - * DOUBLE - */ - doubleVal?: number[] | null; - /** - * The data type of tensor. - */ - dtype?: string | null; - /** - * FLOAT - */ - floatVal?: number[] | null; - /** - * INT64 - */ - int64Val?: string[] | null; - /** - * INT_8 INT_16 INT_32 - */ - intVal?: number[] | null; - /** - * A list of tensor values. - */ - listVal?: Schema$GoogleCloudAiplatformV1Tensor[]; - /** - * Shape of the tensor. - */ - shape?: string[] | null; - /** - * STRING - */ - stringVal?: string[] | null; - /** - * A map of string to tensor. - */ - structVal?: {[key: string]: Schema$GoogleCloudAiplatformV1Tensor} | null; - /** - * Serialized raw tensor content. - */ - tensorVal?: string | null; - /** - * UINT64 - */ - uint64Val?: string[] | null; - /** - * UINT8 UINT16 UINT32 - */ - uintVal?: number[] | null; - } - /** - * Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a Google Cloud project. If needed users can also create extra Tensorboards in their projects. - */ - export interface Schema$GoogleCloudAiplatformV1Tensorboard { - /** - * Output only. Consumer project Cloud Storage path prefix used to store blob data, which can either be a bucket or directory. Does not end with a '/'. - */ - blobStoragePathPrefix?: string | null; - /** - * Output only. Timestamp when this Tensorboard was created. - */ - createTime?: string | null; - /** - * Description of this Tensorboard. - */ - description?: string | null; - /** - * Required. User provided name of this Tensorboard. - */ - displayName?: string | null; - /** - * Customer-managed encryption key spec for a Tensorboard. If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key. - */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; - /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * Used to indicate if the TensorBoard instance is the default one. Each project & region can have at most one default TensorBoard instance. Creation of a default TensorBoard instance and updating an existing TensorBoard instance to be default will mark all other TensorBoard instances (if any) as non default. - */ - isDefault?: boolean | null; - /** - * The labels with user-defined metadata to organize your Tensorboards. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Tensorboard (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. - */ - labels?: {[key: string]: string} | null; - /** - * Output only. Name of the Tensorboard. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` - */ - name?: string | null; - /** - * Output only. The number of Runs stored in this Tensorboard. - */ - runCount?: number | null; - /** - * Output only. Timestamp when this Tensorboard was last updated. - */ - updateTime?: string | null; - } - /** - * One blob (e.g, image, graph) viewable on a blob metric plot. - */ - export interface Schema$GoogleCloudAiplatformV1TensorboardBlob { - /** - * Optional. The bytes of the blob is not present unless it's returned by the ReadTensorboardBlobData endpoint. - */ - data?: string | null; - /** - * Output only. A URI safe key uniquely identifying a blob. Can be used to locate the blob stored in the Cloud Storage bucket of the consumer project. - */ - id?: string | null; - } - /** - * One point viewable on a blob metric plot, but mostly just a wrapper message to work around repeated fields can't be used directly within `oneof` fields. - */ - export interface Schema$GoogleCloudAiplatformV1TensorboardBlobSequence { - /** - * List of blobs contained within the sequence. - */ - values?: Schema$GoogleCloudAiplatformV1TensorboardBlob[]; - } - /** - * A TensorboardExperiment is a group of TensorboardRuns, that are typically the results of a training job run, in a Tensorboard. - */ - export interface Schema$GoogleCloudAiplatformV1TensorboardExperiment { - /** - * Output only. Timestamp when this TensorboardExperiment was created. - */ - createTime?: string | null; - /** - * Description of this TensorboardExperiment. - */ - description?: string | null; - /** - * User provided name of this TensorboardExperiment. - */ - displayName?: string | null; - /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * The labels with user-defined metadata to organize your TensorboardExperiment. Label keys and values cannot be longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with `aiplatform.googleapis.com/` and are immutable. The following system labels exist for each Dataset: * `aiplatform.googleapis.com/dataset_metadata_schema`: output only. Its value is the metadata_schema's title. - */ - labels?: {[key: string]: string} | null; - /** - * Output only. Name of the TensorboardExperiment. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` - */ - name?: string | null; - /** - * Immutable. Source of the TensorboardExperiment. Example: a custom training job. - */ - source?: string | null; - /** - * Output only. Timestamp when this TensorboardExperiment was last updated. - */ - updateTime?: string | null; - } - /** - * TensorboardRun maps to a specific execution of a training job with a given set of hyperparameter values, model definition, dataset, etc - */ - export interface Schema$GoogleCloudAiplatformV1TensorboardRun { - /** - * Output only. Timestamp when this TensorboardRun was created. - */ - createTime?: string | null; - /** - * Description of this TensorboardRun. - */ - description?: string | null; - /** - * Required. User provided name of this TensorboardRun. This value must be unique among all TensorboardRuns belonging to the same parent TensorboardExperiment. - */ - displayName?: string | null; - /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * The labels with user-defined metadata to organize your TensorboardRuns. This field will be used to filter and visualize Runs in the Tensorboard UI. For example, a Vertex AI training job can set a label aiplatform.googleapis.com/training_job_id=xxxxx to all the runs created within that job. An end user can set a label experiment_id=xxxxx for all the runs produced in a Jupyter notebook. These runs can be grouped by a label value and visualized together in the Tensorboard UI. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one TensorboardRun (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. - */ - labels?: {[key: string]: string} | null; - /** - * Output only. Name of the TensorboardRun. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` - */ - name?: string | null; - /** - * Output only. Timestamp when this TensorboardRun was last updated. - */ - updateTime?: string | null; - } - /** - * One point viewable on a tensor metric plot. - */ - export interface Schema$GoogleCloudAiplatformV1TensorboardTensor { - /** - * Required. Serialized form of https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto - */ - value?: string | null; - /** - * Optional. Version number of TensorProto used to serialize value. - */ - versionNumber?: number | null; - } - /** - * TensorboardTimeSeries maps to times series produced in training runs - */ - export interface Schema$GoogleCloudAiplatformV1TensorboardTimeSeries { - /** - * Output only. Timestamp when this TensorboardTimeSeries was created. - */ - createTime?: string | null; - /** - * Description of this TensorboardTimeSeries. - */ - description?: string | null; + } + /** + * Config that contains the strategy used to generate sliding windows in time series training. A window is a series of rows that comprise the context up to the time of prediction, and the horizon following. The corresponding row for each window marks the start of the forecast horizon. Each window is used as an input example for training/evaluation. + */ + export interface Schema$GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig { /** - * Required. User provided name of this TensorboardTimeSeries. This value should be unique among all TensorboardTimeSeries resources belonging to the same TensorboardRun resource (parent resource). + * Name of the column that should be used to generate sliding windows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window with the horizon starting at that row. The column will not be used as a feature in training. */ - displayName?: string | null; + column?: string | null; /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Maximum number of windows that should be generated across all time series. */ - etag?: string | null; + maxCount?: string | null; /** - * Output only. Scalar, Tensor, or Blob metadata for this TensorboardTimeSeries. + * Stride length used to generate input examples. Within one time series, every {$STRIDE_LENGTH\} rows will be used to generate a sliding window. */ - metadata?: Schema$GoogleCloudAiplatformV1TensorboardTimeSeriesMetadata; + strideLength?: string | null; + } + /** + * A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1. + */ + export interface Schema$GoogleCloudAiplatformV1SchemaVertex { /** - * Output only. Name of the TensorboardTimeSeries. + * X coordinate. */ - name?: string | null; + x?: number | null; /** - * Data of the current plugin, with the size limited to 65KB. + * Y coordinate. */ - pluginData?: string | null; + y?: number | null; + } + /** + * Annotation details specific to video action recognition. + */ + export interface Schema$GoogleCloudAiplatformV1SchemaVideoActionRecognitionAnnotation { /** - * Immutable. Name of the plugin this time series pertain to. Such as Scalar, Tensor, Blob + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - pluginName?: string | null; + annotationSpecId?: string | null; /** - * Output only. Timestamp when this TensorboardTimeSeries was last updated. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - updateTime?: string | null; + displayName?: string | null; /** - * Required. Immutable. Type of TensorboardTimeSeries value. + * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. */ - valueType?: string | null; + timeSegment?: Schema$GoogleCloudAiplatformV1SchemaTimeSegment; } /** - * Describes metadata for a TensorboardTimeSeries. + * Annotation details specific to video classification. */ - export interface Schema$GoogleCloudAiplatformV1TensorboardTimeSeriesMetadata { + export interface Schema$GoogleCloudAiplatformV1SchemaVideoClassificationAnnotation { /** - * Output only. The largest blob sequence length (number of blobs) of all data points in this time series, if its ValueType is BLOB_SEQUENCE. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - maxBlobSequenceLength?: string | null; + annotationSpecId?: string | null; /** - * Output only. Max step index of all data points within a TensorboardTimeSeries. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - maxStep?: string | null; + displayName?: string | null; /** - * Output only. Max wall clock timestamp of all data points within a TensorboardTimeSeries. + * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. */ - maxWallTime?: string | null; + timeSegment?: Schema$GoogleCloudAiplatformV1SchemaTimeSegment; } /** - * The storage details for TFRecord output content. + * Payload of Video DataItem. */ - export interface Schema$GoogleCloudAiplatformV1TFRecordDestination { + export interface Schema$GoogleCloudAiplatformV1SchemaVideoDataItem { /** - * Required. Google Cloud Storage location. + * Required. Google Cloud Storage URI points to the original video in user's bucket. The video is up to 50 GB in size and up to 3 hour in duration. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; + gcsUri?: string | null; + /** + * Output only. The mime type of the content of the video. Only the videos in below listed mime types are supported. Supported mime_type: - video/mp4 - video/avi - video/quicktime + */ + mimeType?: string | null; } /** - * The config for feature monitoring threshold. + * The metadata of Datasets that contain Video DataItems. */ - export interface Schema$GoogleCloudAiplatformV1ThresholdConfig { + export interface Schema$GoogleCloudAiplatformV1SchemaVideoDatasetMetadata { /** - * Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + * Points to a YAML file stored on Google Cloud Storage describing payload of the Video DataItems that belong to this Dataset. */ - value?: number | null; + dataItemSchemaUri?: string | null; + /** + * Google Cloud Storage Bucket name that contains the blob data of this Dataset. + */ + gcsBucket?: string | null; } /** - * All the data stored in a TensorboardTimeSeries. + * Annotation details specific to video object tracking. */ - export interface Schema$GoogleCloudAiplatformV1TimeSeriesData { + export interface Schema$GoogleCloudAiplatformV1SchemaVideoObjectTrackingAnnotation { /** - * Required. The ID of the TensorboardTimeSeries, which will become the final component of the TensorboardTimeSeries' resource name + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - tensorboardTimeSeriesId?: string | null; + annotationSpecId?: string | null; /** - * Required. Data points in this time series. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - values?: Schema$GoogleCloudAiplatformV1TimeSeriesDataPoint[]; + displayName?: string | null; /** - * Required. Immutable. The value type of this time series. All the values in this time series data must match this value type. + * The instance of the object, expressed as a positive integer. Used to track the same object across different frames. */ - valueType?: string | null; - } - /** - * A TensorboardTimeSeries data point. - */ - export interface Schema$GoogleCloudAiplatformV1TimeSeriesDataPoint { + instanceId?: string | null; /** - * A blob sequence value. + * A time (frame) of a video to which this annotation pertains. Represented as the duration since the video's start. */ - blobs?: Schema$GoogleCloudAiplatformV1TensorboardBlobSequence; + timeOffset?: string | null; /** - * A scalar value. + * The rightmost coordinate of the bounding box. */ - scalar?: Schema$GoogleCloudAiplatformV1Scalar; + xMax?: number | null; /** - * Step index of this data point within the run. + * The leftmost coordinate of the bounding box. */ - step?: string | null; + xMin?: number | null; /** - * A tensor value. + * The bottommost coordinate of the bounding box. */ - tensor?: Schema$GoogleCloudAiplatformV1TensorboardTensor; + yMax?: number | null; /** - * Wall clock timestamp when this data point is generated by the end user. + * The topmost coordinate of the bounding box. */ - wallTime?: string | null; + yMin?: number | null; } + export interface Schema$GoogleCloudAiplatformV1SchemaVisualInspectionClassificationLabelSavedQueryMetadata { + /** + * Whether or not the classification label is multi_label. + */ + multiLabel?: boolean | null; + } + export interface Schema$GoogleCloudAiplatformV1SchemaVisualInspectionMaskSavedQueryMetadata {} /** - * Assigns input data to training, validation, and test sets based on a provided timestamps. The youngest data pieces are assigned to training set, next to validation set, and the oldest to the test set. Supported only for tabular Datasets. + * Response message for DatasetService.SearchDataItems. */ - export interface Schema$GoogleCloudAiplatformV1TimestampSplit { + export interface Schema$GoogleCloudAiplatformV1SearchDataItemsResponse { /** - * Required. The key is a name of one of the Dataset's data columns. The values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. + * The DataItemViews read. */ - key?: string | null; + dataItemViews?: Schema$GoogleCloudAiplatformV1DataItemView[]; /** - * The fraction of the input data that is to be used to evaluate the Model. + * A token to retrieve next page of results. Pass to SearchDataItemsRequest.page_token to obtain that page. */ - testFraction?: number | null; + nextPageToken?: string | null; + } + /** + * Google search entry point. + */ + export interface Schema$GoogleCloudAiplatformV1SearchEntryPoint { /** - * The fraction of the input data that is to be used to train the Model. + * Optional. Web content snippet that can be embedded in a web page or an app webview. */ - trainingFraction?: number | null; + renderedContent?: string | null; /** - * The fraction of the input data that is to be used to validate the Model. + * Optional. Base64 encoded JSON representing array of tuple. */ - validationFraction?: number | null; + sdkBlob?: string | null; } /** - * Tokens info with a list of tokens and the corresponding list of token ids. + * Response message for FeaturestoreService.SearchFeatures. */ - export interface Schema$GoogleCloudAiplatformV1TokensInfo { + export interface Schema$GoogleCloudAiplatformV1SearchFeaturesResponse { /** - * A list of token ids from the input. + * The Features matching the request. Fields returned: * `name` * `description` * `labels` * `create_time` * `update_time` */ - tokenIds?: string[] | null; + features?: Schema$GoogleCloudAiplatformV1Feature[]; /** - * A list of tokens from the input. + * A token, which can be sent as SearchFeaturesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - tokens?: string[] | null; + nextPageToken?: string | null; } /** - * Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + * Request message for MigrationService.SearchMigratableResources. */ - export interface Schema$GoogleCloudAiplatformV1Tool { + export interface Schema$GoogleCloudAiplatformV1SearchMigratableResourcesRequest { /** - * Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. + * A filter for your search. You can use the following types of filters: * Resource type filters. The following strings filter for a specific type of MigratableResource: * `ml_engine_model_version:*` * `automl_model:*` * `automl_dataset:*` * `data_labeling_dataset:*` * "Migrated or not" filters. The following strings filter for resources that either have or have not already been migrated: * `last_migrate_time:*` filters for migrated resources. * `NOT last_migrate_time:*` filters for not yet migrated resources. */ - functionDeclarations?: Schema$GoogleCloudAiplatformV1FunctionDeclaration[]; + filter?: string | null; /** - * Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. + * The standard page size. The default and maximum value is 100. */ - retrieval?: Schema$GoogleCloudAiplatformV1Retrieval; + pageSize?: number | null; + /** + * The standard page token. + */ + pageToken?: string | null; } /** - * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. + * Response message for MigrationService.SearchMigratableResources. */ - export interface Schema$GoogleCloudAiplatformV1TrainingConfig { + export interface Schema$GoogleCloudAiplatformV1SearchMigratableResourcesResponse { /** - * The timeout hours for the CMLE training job, expressed in milli hours i.e. 1,000 value in this field means 1 hour. + * All migratable resources that can be migrated to the location specified in the request. */ - timeoutTrainingMilliHours?: string | null; + migratableResources?: Schema$GoogleCloudAiplatformV1MigratableResource[]; + /** + * The standard next-page token. The migratable_resources may not fill page_size in SearchMigratableResourcesRequest even when there are subsequent pages. + */ + nextPageToken?: string | null; } /** - * The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from Vertex AI's Dataset which becomes the training input, upload the Model to Vertex AI, and evaluate the Model. + * Request message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. */ - export interface Schema$GoogleCloudAiplatformV1TrainingPipeline { + export interface Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest { /** - * Output only. Time when the TrainingPipeline was created. + * Required. The DeployedModel ID of the [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. */ - createTime?: string | null; + deployedModelId?: string | null; /** - * Required. The user-defined name of this TrainingPipeline. + * The latest timestamp of stats being generated. If not set, indicates feching stats till the latest possible one. */ - displayName?: string | null; + endTime?: string | null; /** - * Customer-managed encryption key spec for a TrainingPipeline. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if model_to_upload is not set separately. + * The feature display name. If specified, only return the stats belonging to this feature. Format: ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name, example: "user_destination". */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; + featureDisplayName?: string | null; /** - * Output only. Time when the TrainingPipeline entered any of the following states: `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, `PIPELINE_STATE_CANCELLED`. + * Required. Objectives of the stats to retrieve. */ - endTime?: string | null; + objectives?: Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective[]; /** - * Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or `PIPELINE_STATE_CANCELLED`. + * The standard list page size. */ - error?: Schema$GoogleRpcStatus; + pageSize?: number | null; /** - * Specifies Vertex AI owned input data that may be used for training the Model. The TrainingPipeline's training_task_definition should make clear whether this config is used and if there are any special requirements on how it should be filled. If nothing about this config is mentioned in the training_task_definition, then it should be assumed that the TrainingPipeline does not depend on this configuration. + * A page token received from a previous JobService.SearchModelDeploymentMonitoringStatsAnomalies call. */ - inputDataConfig?: Schema$GoogleCloudAiplatformV1InputDataConfig; + pageToken?: string | null; /** - * The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The earliest timestamp of stats being generated. If not set, indicates fetching stats till the earliest possible one. */ - labels?: {[key: string]: string} | null; + startTime?: string | null; + } + /** + * Stats requested for specific objective. + */ + export interface Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective { /** - * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + * If set, all attribution scores between SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time and SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time are fetched, and page token doesn't take effect in this case. Only used to retrieve attribution score for the top Features which has the highest attribution score in the latest monitoring run. */ - modelId?: string | null; + topFeatureCount?: number | null; + type?: string | null; + } + /** + * Response message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. + */ + export interface Schema$GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse { + /** + * Stats retrieved for requested objectives. There are at most 1000 ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats in the response. + */ + monitoringStats?: Schema$GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies[]; + /** + * The page token that can be used by the next JobService.SearchModelDeploymentMonitoringStatsAnomalies call. + */ + nextPageToken?: string | null; + } + /** + * The request message for FeatureOnlineStoreService.SearchNearestEntities. + */ + export interface Schema$GoogleCloudAiplatformV1SearchNearestEntitiesRequest { /** - * Describes the Model that may be uploaded (via ModelService.UploadModel) by this TrainingPipeline. The TrainingPipeline's training_task_definition should make clear whether this Model description should be populated, and if there are any special requirements regarding how it should be filled. If nothing is mentioned in the training_task_definition, then it should be assumed that this field should not be filled and the training task either uploads the Model without a need of this information, or that training task does not support uploading a Model as part of the pipeline. When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and the trained Model had been uploaded into Vertex AI, then the model_to_upload's resource name is populated. The Model is always uploaded into the Project and Location in which this pipeline is. + * Required. The query. */ - modelToUpload?: Schema$GoogleCloudAiplatformV1Model; + query?: Schema$GoogleCloudAiplatformV1NearestNeighborQuery; /** - * Output only. Resource name of the TrainingPipeline. + * Optional. If set to true, the full entities (including all vector values and metadata) of the nearest neighbors are returned; otherwise only entity id of the nearest neighbors will be returned. Note that returning full entities will significantly increase the latency and cost of the query. */ - name?: string | null; + returnFullEntity?: boolean | null; + } + /** + * Response message for FeatureOnlineStoreService.SearchNearestEntities + */ + export interface Schema$GoogleCloudAiplatformV1SearchNearestEntitiesResponse { /** - * Optional. When specify this field, the `model_to_upload` will not be uploaded as a new model, instead, it will become a new version of this `parent_model`. + * The nearest neighbors of the query entity. */ - parentModel?: string | null; + nearestNeighbors?: Schema$GoogleCloudAiplatformV1NearestNeighbors; + } + /** + * Configuration for the use of custom service account to run the workloads. + */ + export interface Schema$GoogleCloudAiplatformV1ServiceAccountSpec { /** - * Output only. Time when the TrainingPipeline for the first time entered the `PIPELINE_STATE_RUNNING` state. + * Required. If true, custom user-managed service account is enforced to run any workloads (for example, Vertex Jobs) on the resource. Otherwise, uses the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). */ - startTime?: string | null; + enableCustomServiceAccount?: boolean | null; /** - * Output only. The detailed state of the pipeline. + * Optional. Required when all below conditions are met * `enable_custom_service_account` is true; * any runtime is specified via `ResourceRuntimeSpec` on creation time, for example, Ray The users must have `iam.serviceAccounts.actAs` permission on this service account and then the specified runtime containers will run as it. Do not set this field if you want to submit jobs using custom service account to this PersistentResource after creation, but only specify the `service_account` inside the job. */ - state?: string | null; + serviceAccount?: string | null; + } + /** + * A set of Shielded Instance options. See [Images using supported Shielded VM features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm). + */ + export interface Schema$GoogleCloudAiplatformV1ShieldedVmConfig { /** - * Required. A Google Cloud Storage path to the YAML file that defines the training task which is responsible for producing the model artifact, and may also include additional auxiliary work. The definition files that can be used here are found in gs://google-cloud-aiplatform/schema/trainingjob/definition/. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + * Defines whether the instance has [Secure Boot](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot) enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. */ - trainingTaskDefinition?: string | null; + enableSecureBoot?: boolean | null; + } + /** + * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + */ + export interface Schema$GoogleCloudAiplatformV1SmoothGradConfig { /** - * Required. The training task's parameter(s), as specified in the training_task_definition's `inputs`. + * This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features. */ - trainingTaskInputs?: any | null; + featureNoiseSigma?: Schema$GoogleCloudAiplatformV1FeatureNoiseSigma; /** - * Output only. The metadata information as specified in the training_task_definition's `metadata`. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's training_task_definition contains `metadata` object. + * This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. */ - trainingTaskMetadata?: any | null; + noiseSigma?: number | null; /** - * Output only. Time when the TrainingPipeline was most recently updated. + * The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. */ - updateTime?: string | null; + noisySampleCount?: number | null; } /** - * A message representing a Trial. A Trial contains a unique set of Parameters that has been or will be evaluated, along with the objective metrics got by running the Trial. + * SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers and workers. Managers are responsible for managing the workers in this pool as well as customers' data labeling jobs associated with this pool. Customers create specialist pool as well as start data labeling jobs on Cloud, managers and workers handle the jobs using CrowdCompute console. */ - export interface Schema$GoogleCloudAiplatformV1Trial { + export interface Schema$GoogleCloudAiplatformV1SpecialistPool { /** - * Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique client_id. When a client asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client should evaluate the Trial, complete it, and report back to Vertex AI Vizier. If suggestion is asked again by same client_id before the Trial is completed, the same Trial will be returned. Multiple clients with different client_ids can ask for suggestions simultaneously, each of them will get their own Trial. + * Required. The user-defined name of the SpecialistPool. The name can be up to 128 characters long and can consist of any UTF-8 characters. This field should be unique on project-level. */ - clientId?: string | null; + displayName?: string | null; /** - * Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. + * Required. The resource name of the SpecialistPool. */ - customJob?: string | null; + name?: string | null; /** - * Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`. + * Output only. The resource name of the pending data labeling jobs. */ - endTime?: string | null; + pendingDataLabelingJobs?: string[] | null; /** - * Output only. The final measurement containing the objective value. + * The email addresses of the managers in the SpecialistPool. */ - finalMeasurement?: Schema$GoogleCloudAiplatformV1Measurement; + specialistManagerEmails?: string[] | null; /** - * Output only. The identifier of the Trial assigned by the service. + * Output only. The number of managers in this SpecialistPool. */ - id?: string | null; + specialistManagersCount?: number | null; /** - * Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is `INFEASIBLE`. + * The email addresses of workers in the SpecialistPool. */ - infeasibleReason?: string | null; + specialistWorkerEmails?: string[] | null; + } + /** + * Metadata information for NotebookService.StartNotebookRuntime. + */ + export interface Schema$GoogleCloudAiplatformV1StartNotebookRuntimeOperationMetadata { /** - * Output only. A list of measurements that are strictly lexicographically ordered by their induced tuples (steps, elapsed_duration). These are used for early stopping computations. + * The operation generic information. */ - measurements?: Schema$GoogleCloudAiplatformV1Measurement[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * Output only. Resource name of the Trial assigned by the service. + * A human-readable message that shows the intermediate progress details of NotebookRuntime. */ - name?: string | null; + progressMessage?: string | null; + } + /** + * Request message for NotebookService.StartNotebookRuntime. + */ + export interface Schema$GoogleCloudAiplatformV1StartNotebookRuntimeRequest {} + /** + * Request message for VizierService.StopTrial. + */ + export interface Schema$GoogleCloudAiplatformV1StopTrialRequest {} + /** + * Assigns input data to the training, validation, and test sets so that the distribution of values found in the categorical column (as specified by the `key` field) is mirrored within each split. The fraction values determine the relative sizes of the splits. For example, if the specified column has three values, with 50% of the rows having value "A", 25% value "B", and 25% value "C", and the split fractions are specified as 80/10/10, then the training set will constitute 80% of the training data, with about 50% of the training set rows having the value "A" for the specified column, about 25% having the value "B", and about 25% having the value "C". Only the top 500 occurring values are used; any values not in the top 500 values are randomly assigned to a split. If less than three rows contain a specific value, those rows are randomly assigned. Supported only for tabular Datasets. + */ + export interface Schema$GoogleCloudAiplatformV1StratifiedSplit { /** - * Output only. The parameters of the Trial. + * Required. The key is a name of one of the Dataset's data columns. The key provided must be for a categorical column. */ - parameters?: Schema$GoogleCloudAiplatformV1TrialParameter[]; + key?: string | null; /** - * Output only. Time when the Trial was started. + * The fraction of the input data that is to be used to evaluate the Model. */ - startTime?: string | null; + testFraction?: number | null; /** - * Output only. The detailed state of the Trial. + * The fraction of the input data that is to be used to train the Model. */ - state?: string | null; + trainingFraction?: number | null; /** - * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if this trial is part of a HyperparameterTuningJob and the job's trial_job_spec.enable_web_access field is `true`. The keys are names of each node used for the trial; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. + * The fraction of the input data that is to be used to validate the Model. */ - webAccessUris?: {[key: string]: string} | null; + validationFraction?: number | null; } /** - * Next ID: 3 + * Request message for PredictionService.StreamingPredict. The first message must contain endpoint field and optionally input. The subsequent messages must contain input. */ - export interface Schema$GoogleCloudAiplatformV1TrialContext { + export interface Schema$GoogleCloudAiplatformV1StreamingPredictRequest { /** - * A human-readable field which can store a description of this context. This will become part of the resulting Trial's description field. + * The prediction input. */ - description?: string | null; + inputs?: Schema$GoogleCloudAiplatformV1Tensor[]; /** - * If/when a Trial is generated or selected from this Context, its Parameters will match any parameters specified here. (I.e. if this context specifies parameter name:'a' int_value:3, then a resulting Trial will have int_value:3 for its parameter named 'a'.) Note that we first attempt to match existing REQUESTED Trials with contexts, and if there are no matches, we generate suggestions in the subspace defined by the parameters specified here. NOTE: a Context without any Parameters matches the entire feasible search space. + * The parameters that govern the prediction. */ - parameters?: Schema$GoogleCloudAiplatformV1TrialParameter[]; + parameters?: Schema$GoogleCloudAiplatformV1Tensor; } /** - * A message representing a parameter to be tuned. + * Response message for PredictionService.StreamingPredict. */ - export interface Schema$GoogleCloudAiplatformV1TrialParameter { + export interface Schema$GoogleCloudAiplatformV1StreamingPredictResponse { /** - * Output only. The ID of the parameter. The parameter should be defined in StudySpec's Parameters. + * The prediction output. */ - parameterId?: string | null; + outputs?: Schema$GoogleCloudAiplatformV1Tensor[]; /** - * Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'. + * The parameters that govern the prediction. */ - value?: any | null; + parameters?: Schema$GoogleCloudAiplatformV1Tensor; } /** - * The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. + * Request message for FeaturestoreOnlineServingService.StreamingFeatureValuesRead. */ - export interface Schema$GoogleCloudAiplatformV1TunedModel { + export interface Schema$GoogleCloudAiplatformV1StreamingReadFeatureValuesRequest { /** - * Output only. A resource name of an Endpoint. Format: `projects/{project\}/locations/{location\}/endpoints/{endpoint\}`. + * Required. IDs of entities to read Feature values of. The maximum number of IDs is 100. For example, for a machine learning model predicting user clicks on a website, an entity ID could be `user_123`. */ - endpoint?: string | null; + entityIds?: string[] | null; /** - * Output only. The resource name of the TunedModel. Format: `projects/{project\}/locations/{location\}/models/{model\}`. + * Required. Selector choosing Features of the target EntityType. Feature IDs will be deduplicated. */ - model?: string | null; + featureSelector?: Schema$GoogleCloudAiplatformV1FeatureSelector; } /** - * The tuning data statistic values for TuningJob. + * Request message for PredictionService.StreamRawPredict. */ - export interface Schema$GoogleCloudAiplatformV1TuningDataStats { + export interface Schema$GoogleCloudAiplatformV1StreamRawPredictRequest { /** - * The SFT Tuning data stats. + * The prediction input. Supports HTTP headers and arbitrary data payload. */ - supervisedTuningDataStats?: Schema$GoogleCloudAiplatformV1SupervisedTuningDataStats; + httpBody?: Schema$GoogleApiHttpBody; } /** - * Represents a TuningJob that runs with Google owned models. + * A list of string values. */ - export interface Schema$GoogleCloudAiplatformV1TuningJob { + export interface Schema$GoogleCloudAiplatformV1StringArray { /** - * The base model that is being tuned, e.g., "gemini-1.0-pro-002". + * A list of string values. */ - baseModel?: string | null; + values?: string[] | null; + } + /** + * A message representing a Study. + */ + export interface Schema$GoogleCloudAiplatformV1Study { /** - * Output only. Time when the TuningJob was created. + * Output only. Time at which the study was created. */ createTime?: string | null; /** - * Optional. The description of the TuningJob. + * Required. Describes the Study, default value is empty string. */ - description?: string | null; + displayName?: string | null; /** - * Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key. + * Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; + inactiveReason?: string | null; /** - * Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. + * Output only. The name of a study. The study's globally unique identifier. Format: `projects/{project\}/locations/{location\}/studies/{study\}` */ - endTime?: string | null; + name?: string | null; /** - * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + * Output only. The detailed state of a Study. */ - error?: Schema$GoogleRpcStatus; + state?: string | null; /** - * Output only. The Experiment associated with this TuningJob. + * Required. Configuration of the Study. */ - experiment?: string | null; + studySpec?: Schema$GoogleCloudAiplatformV1StudySpec; + } + /** + * Represents specification of a Study. + */ + export interface Schema$GoogleCloudAiplatformV1StudySpec { /** - * Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The search algorithm specified for the Study. */ - labels?: {[key: string]: string} | null; + algorithm?: string | null; /** - * Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project\}/locations/{location\}/tuningJobs/{tuning_job\}` + * The automated early stopping spec using convex stopping rule. */ - name?: string | null; + convexAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec; /** - * Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state. + * The automated early stopping spec using decay curve rule. */ - startTime?: string | null; + decayCurveStoppingSpec?: Schema$GoogleCloudAiplatformV1StudySpecDecayCurveAutomatedStoppingSpec; /** - * Output only. The detailed state of the job. + * Describe which measurement selection type will be used + */ + measurementSelectionType?: string | null; + /** + * The automated early stopping spec using median rule. + */ + medianAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1StudySpecMedianAutomatedStoppingSpec; + /** + * Required. Metric specs for the Study. + */ + metrics?: Schema$GoogleCloudAiplatformV1StudySpecMetricSpec[]; + /** + * The observation noise level of the study. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. + */ + observationNoise?: string | null; + /** + * Required. The set of parameters to tune. + */ + parameters?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpec[]; + /** + * Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + */ + studyStoppingConfig?: Schema$GoogleCloudAiplatformV1StudySpecStudyStoppingConfig; + } + /** + * Configuration for ConvexAutomatedStoppingSpec. When there are enough completed trials (configured by min_measurement_count), for pending trials with enough measurements and steps, the policy first computes an overestimate of the objective value at max_num_steps according to the slope of the incomplete objective value curve. No prediction can be made if the curve is completely flat. If the overestimation is worse than the best objective value of the completed trials, this pending trial will be early-stopped, but a last measurement will be added to the pending trial with max_num_steps and predicted objective value from the autoregression model. + */ + export interface Schema$GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec { + /** + * The hyper-parameter name used in the tuning job that stands for learning rate. Leave it blank if learning rate is not in a parameter in tuning. The learning_rate is used to estimate the objective value of the ongoing trial. */ - state?: string | null; + learningRateParameterName?: string | null; /** - * Tuning Spec for Supervised Fine Tuning. + * Steps used in predicting the final objective for early stopped trials. In general, it's set to be the same as the defined steps in training / tuning. If not defined, it will learn it from the completed trials. When use_steps is false, this field is set to the maximum elapsed seconds. */ - supervisedTuningSpec?: Schema$GoogleCloudAiplatformV1SupervisedTuningSpec; + maxStepCount?: string | null; /** - * Output only. The tuned model resources assiociated with this TuningJob. + * The minimal number of measurements in a Trial. Early-stopping checks will not trigger if less than min_measurement_count+1 completed trials or pending trials with less than min_measurement_count measurements. If not defined, the default value is 5. */ - tunedModel?: Schema$GoogleCloudAiplatformV1TunedModel; + minMeasurementCount?: string | null; /** - * Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Minimum number of steps for a trial to complete. Trials which do not have a measurement with step_count \> min_step_count won't be considered for early stopping. It's ok to set it to 0, and a trial can be early stopped at any stage. By default, min_step_count is set to be one-tenth of the max_step_count. When use_elapsed_duration is true, this field is set to the minimum elapsed seconds. */ - tunedModelDisplayName?: string | null; + minStepCount?: string | null; /** - * Output only. The tuning data statistics associated with this TuningJob. + * ConvexAutomatedStoppingSpec by default only updates the trials that needs to be early stopped using a newly trained auto-regressive model. When this flag is set to True, all stopped trials from the beginning are potentially updated in terms of their `final_measurement`. Also, note that the training logic of autoregressive models is different in this case. Enabling this option has shown better results and this may be the default option in the future. */ - tuningDataStats?: Schema$GoogleCloudAiplatformV1TuningDataStats; + updateAllStoppedTrials?: boolean | null; /** - * Output only. Time when the TuningJob was most recently updated. + * This bool determines whether or not the rule is applied based on elapsed_secs or steps. If use_elapsed_duration==false, the early stopping decision is made according to the predicted objective values according to the target steps. If use_elapsed_duration==true, elapsed_secs is used instead of steps. Also, in this case, the parameters max_num_steps and min_num_steps are overloaded to contain max_elapsed_seconds and min_elapsed_seconds. */ - updateTime?: string | null; + useElapsedDuration?: boolean | null; } /** - * Runtime operation information for IndexEndpointService.UndeployIndex. + * The decay curve automated stopping rule builds a Gaussian Process Regressor to predict the final objective value of a Trial based on the already completed Trials and the intermediate measurements of the current Trial. Early stopping is requested for the current Trial if there is very low probability to exceed the optimal value found so far. */ - export interface Schema$GoogleCloudAiplatformV1UndeployIndexOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecDecayCurveAutomatedStoppingSpec { /** - * The operation generic information. + * True if Measurement.elapsed_duration is used as the x-axis of each Trials Decay Curve. Otherwise, Measurement.step_count will be used as the x-axis. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + useElapsedDuration?: boolean | null; } /** - * Request message for IndexEndpointService.UndeployIndex. + * The median automated stopping rule stops a pending Trial if the Trial's best objective_value is strictly below the median 'performance' of all completed Trials reported up to the Trial's last measurement. Currently, 'performance' refers to the running average of the objective values reported by the Trial in each measurement. */ - export interface Schema$GoogleCloudAiplatformV1UndeployIndexRequest { + export interface Schema$GoogleCloudAiplatformV1StudySpecMedianAutomatedStoppingSpec { /** - * Required. The ID of the DeployedIndex to be undeployed from the IndexEndpoint. + * True if median automated stopping rule applies on Measurement.elapsed_duration. It means that elapsed_duration field of latest measurement of current Trial is used to compute median objective value for each completed Trials. */ - deployedIndexId?: string | null; + useElapsedDuration?: boolean | null; } /** - * Response message for IndexEndpointService.UndeployIndex. - */ - export interface Schema$GoogleCloudAiplatformV1UndeployIndexResponse {} - /** - * Runtime operation information for EndpointService.UndeployModel. + * Represents a metric to optimize. */ - export interface Schema$GoogleCloudAiplatformV1UndeployModelOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecMetricSpec { /** - * The operation generic information. + * Required. The optimization goal of the metric. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + goal?: string | null; + /** + * Required. The ID of the metric. Must not contain whitespaces and must be unique amongst all MetricSpecs. + */ + metricId?: string | null; + /** + * Used for safe search. In the case, the metric will be a safety metric. You must provide a separate metric for objective metric. + */ + safetyConfig?: Schema$GoogleCloudAiplatformV1StudySpecMetricSpecSafetyMetricConfig; } /** - * Request message for EndpointService.UndeployModel. + * Used in safe optimization to specify threshold levels and risk tolerance. */ - export interface Schema$GoogleCloudAiplatformV1UndeployModelRequest { + export interface Schema$GoogleCloudAiplatformV1StudySpecMetricSpecSafetyMetricConfig { /** - * Required. The ID of the DeployedModel to be undeployed from the Endpoint. + * Desired minimum fraction of safe trials (over total number of trials) that should be targeted by the algorithm at any time during the study (best effort). This should be between 0.0 and 1.0 and a value of 0.0 means that there is no minimum and an algorithm proceeds without targeting any specific fraction. A value of 1.0 means that the algorithm attempts to only Suggest safe Trials. */ - deployedModelId?: string | null; + desiredMinSafeTrialsFraction?: number | null; /** - * If this field is provided, then the Endpoint's traffic_split will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A DeployedModel will be successfully undeployed only if it doesn't have any traffic assigned to it when this method executes, or if this field unassigns any traffic to it. + * Safety threshold (boundary value between safe and unsafe). NOTE that if you leave SafetyMetricConfig unset, a default value of 0 will be used. */ - trafficSplit?: {[key: string]: number} | null; + safetyThreshold?: number | null; } /** - * Response message for EndpointService.UndeployModel. - */ - export interface Schema$GoogleCloudAiplatformV1UndeployModelResponse {} - /** - * Contains model information necessary to perform batch prediction without requiring a full model import. + * Represents a single parameter to optimize. */ - export interface Schema$GoogleCloudAiplatformV1UnmanagedContainerModel { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpec { /** - * The path to the directory containing the Model artifact and any of its supporting files. + * The value spec for a 'CATEGORICAL' parameter. */ - artifactUri?: string | null; + categoricalValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecCategoricalValueSpec; /** - * Input only. The specification of the container that is to be used when deploying this Model. + * A conditional parameter node is active if the parameter's value matches the conditional node's parent_value_condition. If two items in conditional_parameter_specs have the same name, they must have disjoint parent_value_condition. */ - containerSpec?: Schema$GoogleCloudAiplatformV1ModelContainerSpec; + conditionalParameterSpecs?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec[]; /** - * Contains the schemata used in Model's predictions and explanations + * The value spec for a 'DISCRETE' parameter. */ - predictSchemata?: Schema$GoogleCloudAiplatformV1PredictSchemata; - } - /** - * Runtime operation information for UpdateDeploymentResourcePool method. - */ - export interface Schema$GoogleCloudAiplatformV1UpdateDeploymentResourcePoolOperationMetadata { + discreteValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDiscreteValueSpec; /** - * The operation generic information. + * The value spec for a 'DOUBLE' parameter. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Runtime operation information for ModelService.UpdateExplanationDataset. - */ - export interface Schema$GoogleCloudAiplatformV1UpdateExplanationDatasetOperationMetadata { + doubleValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDoubleValueSpec; /** - * The common part of the operation metadata. + * The value spec for an 'INTEGER' parameter. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Request message for ModelService.UpdateExplanationDataset. - */ - export interface Schema$GoogleCloudAiplatformV1UpdateExplanationDatasetRequest { + integerValueSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec; /** - * The example config containing the location of the dataset. + * Required. The ID of the parameter. Must not contain whitespaces and must be unique amongst all ParameterSpecs. */ - examples?: Schema$GoogleCloudAiplatformV1Examples; + parameterId?: string | null; + /** + * How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. + */ + scaleType?: string | null; } /** - * Response message of ModelService.UpdateExplanationDataset operation. - */ - export interface Schema$GoogleCloudAiplatformV1UpdateExplanationDatasetResponse {} - /** - * Details of operations that perform update FeatureGroup. + * Value specification for a parameter in `CATEGORICAL` type. */ - export interface Schema$GoogleCloudAiplatformV1UpdateFeatureGroupOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecCategoricalValueSpec { /** - * Operation metadata for FeatureGroup. + * A default value for a `CATEGORICAL` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + defaultValue?: string | null; + /** + * Required. The list of possible categories. + */ + values?: string[] | null; } /** - * Details of operations that perform update FeatureOnlineStore. + * Represents a parameter spec with condition from its parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1UpdateFeatureOnlineStoreOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec { /** - * Operation metadata for FeatureOnlineStore. + * Required. The spec for a conditional parameter. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + parameterSpec?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpec; + /** + * The spec for matching values from a parent parameter of `CATEGORICAL` type. + */ + parentCategoricalValues?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition; + /** + * The spec for matching values from a parent parameter of `DISCRETE` type. + */ + parentDiscreteValues?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition; + /** + * The spec for matching values from a parent parameter of `INTEGER` type. + */ + parentIntValues?: Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition; } /** - * Details of operations that perform update Feature. + * Represents the spec to match categorical values from parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1UpdateFeatureOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition { /** - * Operation metadata for Feature Update. + * Required. Matches values of the parent parameter of 'CATEGORICAL' type. All values must exist in `categorical_value_spec` of parent parameter. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + values?: string[] | null; } /** - * Details of operations that perform update Featurestore. + * Represents the spec to match discrete values from parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1UpdateFeaturestoreOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition { /** - * Operation metadata for Featurestore. + * Required. Matches values of the parent parameter of 'DISCRETE' type. All values must exist in `discrete_value_spec` of parent parameter. The Epsilon of the value matching is 1e-10. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + values?: number[] | null; } /** - * Details of operations that perform update FeatureView. + * Represents the spec to match integer values from parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1UpdateFeatureViewOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition { /** - * Operation metadata for FeatureView Update. + * Required. Matches values of the parent parameter of 'INTEGER' type. All values must lie in `integer_value_spec` of parent parameter. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + values?: string[] | null; } /** - * Runtime operation information for IndexService.UpdateIndex. + * Value specification for a parameter in `DISCRETE` type. */ - export interface Schema$GoogleCloudAiplatformV1UpdateIndexOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDiscreteValueSpec { /** - * The operation generic information. + * A default value for a `DISCRETE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. It automatically rounds to the nearest feasible discrete point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + defaultValue?: number | null; /** - * The operation metadata with regard to Matching Engine Index operation. + * Required. A list of possible values. The list should be in increasing order and at least 1e-10 apart. For instance, this parameter might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. */ - nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata; + values?: number[] | null; } /** - * Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob. + * Value specification for a parameter in `DOUBLE` type. */ - export interface Schema$GoogleCloudAiplatformV1UpdateModelDeploymentMonitoringJobOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecDoubleValueSpec { /** - * The operation generic information. + * A default value for a `DOUBLE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Details of operations that perform update PersistentResource. - */ - export interface Schema$GoogleCloudAiplatformV1UpdatePersistentResourceOperationMetadata { + defaultValue?: number | null; /** - * Operation metadata for PersistentResource. + * Required. Inclusive maximum value of the parameter. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + maxValue?: number | null; /** - * Progress Message for Update LRO + * Required. Inclusive minimum value of the parameter. */ - progressMessage?: string | null; + minValue?: number | null; } /** - * Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool. + * Value specification for a parameter in `INTEGER` type. */ - export interface Schema$GoogleCloudAiplatformV1UpdateSpecialistPoolOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec { /** - * The operation generic information. + * A default value for an `INTEGER` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + defaultValue?: string | null; /** - * Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id\}/locations/{location_id\}/specialistPools/{specialist_pool\}` + * Required. Inclusive maximum value of the parameter. */ - specialistPool?: string | null; - } - /** - * Details of operations that perform update Tensorboard. - */ - export interface Schema$GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata { + maxValue?: string | null; /** - * Operation metadata for Tensorboard. + * Required. Inclusive minimum value of the parameter. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + minValue?: string | null; } /** - * Metadata information for NotebookService.UpgradeNotebookRuntime. + * The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. */ - export interface Schema$GoogleCloudAiplatformV1UpgradeNotebookRuntimeOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1StudySpecStudyStoppingConfig { /** - * The operation generic information. + * If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + maxDurationNoProgress?: string | null; /** - * A human-readable message that shows the intermediate progress details of NotebookRuntime. - */ - progressMessage?: string | null; - } - /** - * Request message for NotebookService.UpgradeNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest {} - /** - * Details of ModelService.UploadModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1UploadModelOperationMetadata { + * If the specified time or duration has passed, stop the study. + */ + maximumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1StudyTimeConstraint; /** - * The common part of the operation metadata. + * If there are more than this many trials, stop the study. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; - } - /** - * Request message for ModelService.UploadModel. - */ - export interface Schema$GoogleCloudAiplatformV1UploadModelRequest { + maxNumTrials?: number | null; /** - * Required. The Model to create. + * If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. */ - model?: Schema$GoogleCloudAiplatformV1Model; + maxNumTrialsNoProgress?: number | null; /** - * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + * Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. */ - modelId?: string | null; + minimumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1StudyTimeConstraint; /** - * Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + * If there are fewer than this many COMPLETED trials, do not stop the study. */ - parentModel?: string | null; + minNumTrials?: number | null; /** - * Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + * If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). */ - serviceAccount?: string | null; + shouldStopAsap?: boolean | null; } /** - * Response message of ModelService.UploadModel operation. + * Time-based Constraint for Study */ - export interface Schema$GoogleCloudAiplatformV1UploadModelResponse { + export interface Schema$GoogleCloudAiplatformV1StudyTimeConstraint { /** - * The name of the uploaded Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Compares the wallclock time to this time. Must use UTC timezone. */ - model?: string | null; + endTime?: string | null; /** - * Output only. The version ID of the model that is uploaded. + * Counts the wallclock time passed since the creation of this Study. */ - modelVersionId?: string | null; + maxDuration?: string | null; } /** - * Request message for IndexService.UpsertDatapoints + * Details of operations that perform Trials suggestion. */ - export interface Schema$GoogleCloudAiplatformV1UpsertDatapointsRequest { + export interface Schema$GoogleCloudAiplatformV1SuggestTrialsMetadata { /** - * A list of datapoints to be created/updated. + * The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. */ - datapoints?: Schema$GoogleCloudAiplatformV1IndexDatapoint[]; + clientId?: string | null; /** - * Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts. + * Operation metadata for suggesting Trials. */ - updateMask?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Response message for IndexService.UpsertDatapoints - */ - export interface Schema$GoogleCloudAiplatformV1UpsertDatapointsResponse {} - /** - * References an API call. It contains more information about long running operation and Jobs that are triggered by the API call. + * Request message for VizierService.SuggestTrials. */ - export interface Schema$GoogleCloudAiplatformV1UserActionReference { + export interface Schema$GoogleCloudAiplatformV1SuggestTrialsRequest { /** - * For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project\}/locations/{location\}/dataLabelingJobs/{data_labeling_job\}` + * Required. The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. */ - dataLabelingJob?: string | null; + clientId?: string | null; /** - * The method name of the API RPC call. For example, "/google.cloud.aiplatform.{apiVersion\}.DatasetService.CreateDataset" + * Optional. This allows you to specify the "context" for a Trial; a context is a slice (a subspace) of the search space. Typical uses for contexts: 1) You are using Vizier to tune a server for best performance, but there's a strong weekly cycle. The context specifies the day-of-week. This allows Tuesday to generalize from Wednesday without assuming that everything is identical. 2) Imagine you're optimizing some medical treatment for people. As they walk in the door, you know certain facts about them (e.g. sex, weight, height, blood-pressure). Put that information in the context, and Vizier will adapt its suggestions to the patient. 3) You want to do a fair A/B test efficiently. Specify the "A" and "B" conditions as contexts, and Vizier will generalize between "A" and "B" conditions. If they are similar, this will allow Vizier to converge to the optimum faster than if "A" and "B" were separate Studies. NOTE: You can also enter contexts as REQUESTED Trials, e.g. via the CreateTrial() RPC; that's the asynchronous option where you don't need a close association between contexts and suggestions. NOTE: All the Parameters you set in a context MUST be defined in the Study. NOTE: You must supply 0 or $suggestion_count contexts. If you don't supply any contexts, Vizier will make suggestions from the full search space specified in the StudySpec; if you supply a full set of context, each suggestion will match the corresponding context. NOTE: A Context with no features set matches anything, and allows suggestions from the full search space. NOTE: Contexts MUST lie within the search space specified in the StudySpec. It's an error if they don't. NOTE: Contexts preferentially match ACTIVE then REQUESTED trials before new suggestions are generated. NOTE: Generation of suggestions involves a match between a Context and (optionally) a REQUESTED trial; if that match is not fully specified, a suggestion will be geneated in the merged subspace. */ - method?: string | null; + contexts?: Schema$GoogleCloudAiplatformV1TrialContext[]; /** - * For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project\}/locations/{location\}/operations/{operation\}` + * Required. The number of suggestions requested. It must be positive. */ - operation?: string | null; + suggestionCount?: number | null; } /** - * Value is the value of the field. + * Response message for VizierService.SuggestTrials. */ - export interface Schema$GoogleCloudAiplatformV1Value { + export interface Schema$GoogleCloudAiplatformV1SuggestTrialsResponse { /** - * A double value. + * The time at which operation processing completed. */ - doubleValue?: number | null; + endTime?: string | null; /** - * An integer value. + * The time at which the operation was started. */ - intValue?: string | null; + startTime?: string | null; /** - * A string value. + * The state of the Study. */ - stringValue?: string | null; - } - /** - * Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation - */ - export interface Schema$GoogleCloudAiplatformV1VertexAISearch { + studyState?: string | null; /** - * Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project\}/locations/{location\}/collections/{collection\}/dataStores/{dataStore\}` + * A list of Trials. */ - datastore?: string | null; + trials?: Schema$GoogleCloudAiplatformV1Trial[]; } /** - * Metadata describes the input video content. + * Hyperparameters for SFT. */ - export interface Schema$GoogleCloudAiplatformV1VideoMetadata { + export interface Schema$GoogleCloudAiplatformV1SupervisedHyperParameters { /** - * Optional. The end offset of the video. + * Optional. Adapter size for tuning. */ - endOffset?: string | null; + adapterSize?: string | null; /** - * Optional. The start offset of the video. + * Optional. Number of complete passes the model makes over the entire training dataset during training. */ - startOffset?: string | null; + epochCount?: string | null; + /** + * Optional. Multiplier for adjusting the default learning rate. + */ + learningRateMultiplier?: number | null; } /** - * Represents the spec of a worker pool in a job. + * Dataset distribution for Supervised Tuning. */ - export interface Schema$GoogleCloudAiplatformV1WorkerPoolSpec { + export interface Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution { /** - * The custom container task. + * Output only. Defines the histogram bucket. */ - containerSpec?: Schema$GoogleCloudAiplatformV1ContainerSpec; + buckets?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistributionDatasetBucket[]; /** - * Disk spec. + * Output only. The maximum of the population values. */ - diskSpec?: Schema$GoogleCloudAiplatformV1DiskSpec; + max?: number | null; /** - * Optional. Immutable. The specification of a single machine. + * Output only. The arithmetic mean of the values in the population. */ - machineSpec?: Schema$GoogleCloudAiplatformV1MachineSpec; + mean?: number | null; /** - * Optional. List of NFS mount spec. + * Output only. The median of the values in the population. */ - nfsMounts?: Schema$GoogleCloudAiplatformV1NfsMount[]; + median?: number | null; /** - * The Python packaged task. + * Output only. The minimum of the population values. */ - pythonPackageSpec?: Schema$GoogleCloudAiplatformV1PythonPackageSpec; + min?: number | null; /** - * Optional. The number of worker replicas to use for this worker pool. + * Output only. The 5th percentile of the values in the population. */ - replicaCount?: string | null; - } - /** - * Contains Feature values to be written for a specific entity. - */ - export interface Schema$GoogleCloudAiplatformV1WriteFeatureValuesPayload { + p5?: number | null; /** - * Required. The ID of the entity. + * Output only. The 95th percentile of the values in the population. */ - entityId?: string | null; + p95?: number | null; /** - * Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future. + * Output only. Sum of a given population of values. */ - featureValues?: { - [key: string]: Schema$GoogleCloudAiplatformV1FeatureValue; - } | null; + sum?: string | null; } /** - * Request message for FeaturestoreOnlineServingService.WriteFeatureValues. + * Dataset bucket used to create a histogram for the distribution given a population of values. */ - export interface Schema$GoogleCloudAiplatformV1WriteFeatureValuesRequest { + export interface Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistributionDatasetBucket { /** - * Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`. + * Output only. Number of values in the bucket. */ - payloads?: Schema$GoogleCloudAiplatformV1WriteFeatureValuesPayload[]; - } - /** - * Response message for FeaturestoreOnlineServingService.WriteFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1WriteFeatureValuesResponse {} - /** - * Request message for TensorboardService.WriteTensorboardExperimentData. - */ - export interface Schema$GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest { + count?: number | null; /** - * Required. Requests containing per-run TensorboardTimeSeries data to write. + * Output only. Left bound of the bucket. */ - writeRunDataRequests?: Schema$GoogleCloudAiplatformV1WriteTensorboardRunDataRequest[]; + left?: number | null; + /** + * Output only. Right bound of the bucket. + */ + right?: number | null; } /** - * Response message for TensorboardService.WriteTensorboardExperimentData. - */ - export interface Schema$GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse {} - /** - * Request message for TensorboardService.WriteTensorboardRunData. + * Tuning data statistics for Supervised Tuning. */ - export interface Schema$GoogleCloudAiplatformV1WriteTensorboardRunDataRequest { + export interface Schema$GoogleCloudAiplatformV1SupervisedTuningDataStats { /** - * Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` + * Output only. Number of billable characters in the tuning dataset. */ - tensorboardRun?: string | null; + totalBillableCharacterCount?: string | null; /** - * Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000. + * Output only. Number of tuning characters in the tuning dataset. */ - timeSeriesData?: Schema$GoogleCloudAiplatformV1TimeSeriesData[]; - } - /** - * Response message for TensorboardService.WriteTensorboardRunData. - */ - export interface Schema$GoogleCloudAiplatformV1WriteTensorboardRunDataResponse {} - /** - * An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. - */ - export interface Schema$GoogleCloudAiplatformV1XraiAttribution { + totalTuningCharacterCount?: string | null; /** - * Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + * Output only. Number of examples in the tuning dataset. */ - blurBaselineConfig?: Schema$GoogleCloudAiplatformV1BlurBaselineConfig; + tuningDatasetExampleCount?: string | null; /** - * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + * Output only. Number of tuning steps for this Tuning Job. */ - smoothGradConfig?: Schema$GoogleCloudAiplatformV1SmoothGradConfig; + tuningStepCount?: string | null; /** - * Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + * Output only. Sample user messages in the training dataset uri. */ - stepCount?: number | null; + userDatasetExamples?: Schema$GoogleCloudAiplatformV1Content[]; + /** + * Output only. Dataset distributions for the user input tokens. + */ + userInputTokenDistribution?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; + /** + * Output only. Dataset distributions for the messages per example. + */ + userMessagePerExampleDistribution?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; + /** + * Output only. Dataset distributions for the user output tokens. + */ + userOutputTokenDistribution?: Schema$GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; } /** - * The response message for Locations.ListLocations. + * Tuning Spec for Supervised Tuning. */ - export interface Schema$GoogleCloudLocationListLocationsResponse { + export interface Schema$GoogleCloudAiplatformV1SupervisedTuningSpec { /** - * A list of locations that matches the specified filter in the request. + * Optional. Hyperparameters for SFT. */ - locations?: Schema$GoogleCloudLocationLocation[]; + hyperParameters?: Schema$GoogleCloudAiplatformV1SupervisedHyperParameters; /** - * The standard List next-page token. + * Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. */ - nextPageToken?: string | null; + trainingDatasetUri?: string | null; + /** + * Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. + */ + validationDatasetUri?: string | null; } /** - * A resource that represents a Google Cloud location. + * Request message for FeatureOnlineStoreAdminService.SyncFeatureView. */ - export interface Schema$GoogleCloudLocationLocation { + export interface Schema$GoogleCloudAiplatformV1SyncFeatureViewRequest {} + /** + * Respose message for FeatureOnlineStoreAdminService.SyncFeatureView. + */ + export interface Schema$GoogleCloudAiplatformV1SyncFeatureViewResponse { /** - * The friendly name for this location, typically a nearby city name. For example, "Tokyo". + * Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}/featureViewSyncs/{feature_view_sync\}` */ - displayName?: string | null; + featureViewSync?: string | null; + } + /** + * A tensor value type. + */ + export interface Schema$GoogleCloudAiplatformV1Tensor { /** - * Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"\} + * Type specific representations that make it easy to create tensor protos in all languages. Only the representation corresponding to "dtype" can be set. The values hold the flattened representation of the tensor in row major order. BOOL */ - labels?: {[key: string]: string} | null; + boolVal?: boolean[] | null; /** - * The canonical id for this location. For example: `"us-east1"`. + * STRING */ - locationId?: string | null; + bytesVal?: string[] | null; /** - * Service-specific metadata. For example the available capacity at the given location. + * DOUBLE */ - metadata?: {[key: string]: any} | null; + doubleVal?: number[] | null; /** - * Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"` + * The data type of tensor. */ - name?: string | null; - } - /** - * Associates `members`, or principals, with a `role`. - */ - export interface Schema$GoogleIamV1Binding { + dtype?: string | null; /** - * The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + * FLOAT */ - condition?: Schema$GoogleTypeExpr; + floatVal?: number[] | null; /** - * Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid\}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid\}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid\}.svc.id.goog[{namespace\}/{kubernetes-sa\}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid\}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain\}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/group/{group_id\}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/x`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/group/{group_id\}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/x`: All identities in a workload identity pool. * `deleted:user:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid\}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid\}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid\}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`. + * INT64 */ - members?: string[] | null; + int64Val?: string[] | null; /** - * Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles). + * INT_8 INT_16 INT_32 */ - role?: string | null; - } - /** - * An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] \}, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", \} \} ], "etag": "BwWWja0YfJA=", "version": 3 \} ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). - */ - export interface Schema$GoogleIamV1Policy { + intVal?: number[] | null; /** - * Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`. + * A list of tensor values. */ - bindings?: Schema$GoogleIamV1Binding[]; + listVal?: Schema$GoogleCloudAiplatformV1Tensor[]; /** - * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. + * Shape of the tensor. */ - etag?: string | null; + shape?: string[] | null; /** - * Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + * STRING */ - version?: number | null; - } - /** - * Request message for `SetIamPolicy` method. - */ - export interface Schema$GoogleIamV1SetIamPolicyRequest { + stringVal?: string[] | null; /** - * REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them. + * A map of string to tensor. */ - policy?: Schema$GoogleIamV1Policy; - } - /** - * Response message for `TestIamPermissions` method. - */ - export interface Schema$GoogleIamV1TestIamPermissionsResponse { + structVal?: {[key: string]: Schema$GoogleCloudAiplatformV1Tensor} | null; /** - * A subset of `TestPermissionsRequest.permissions` that the caller is allowed. + * Serialized raw tensor content. */ - permissions?: string[] | null; - } - /** - * The response message for Operations.ListOperations. - */ - export interface Schema$GoogleLongrunningListOperationsResponse { + tensorVal?: string | null; /** - * The standard List next-page token. + * UINT64 */ - nextPageToken?: string | null; + uint64Val?: string[] | null; /** - * A list of operations that matches the specified filter in the request. + * UINT8 UINT16 UINT32 */ - operations?: Schema$GoogleLongrunningOperation[]; + uintVal?: number[] | null; } /** - * This resource represents a long-running operation that is the result of a network API call. + * Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a Google Cloud project. If needed users can also create extra Tensorboards in their projects. */ - export interface Schema$GoogleLongrunningOperation { - /** - * If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. - */ - done?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1Tensorboard { /** - * The error result of the operation in case of failure or cancellation. + * Output only. Consumer project Cloud Storage path prefix used to store blob data, which can either be a bucket or directory. Does not end with a '/'. */ - error?: Schema$GoogleRpcStatus; + blobStoragePathPrefix?: string | null; /** - * Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + * Output only. Timestamp when this Tensorboard was created. */ - metadata?: {[key: string]: any} | null; + createTime?: string | null; /** - * The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id\}`. + * Description of this Tensorboard. */ - name?: string | null; + description?: string | null; /** - * The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + * Required. User provided name of this Tensorboard. */ - response?: {[key: string]: any} | null; - } - /** - * A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); \} - */ - export interface Schema$GoogleProtobufEmpty {} - /** - * The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). - */ - export interface Schema$GoogleRpcStatus { + displayName?: string | null; /** - * The status code, which should be an enum value of google.rpc.Code. + * Customer-managed encryption key spec for a Tensorboard. If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key. */ - code?: number | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * A list of messages that carry the error details. There is a common set of message types for APIs to use. + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - details?: Array<{[key: string]: any}> | null; + etag?: string | null; /** - * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + * Used to indicate if the TensorBoard instance is the default one. Each project & region can have at most one default TensorBoard instance. Creation of a default TensorBoard instance and updating an existing TensorBoard instance to be default will mark all other TensorBoard instances (if any) as non default. */ - message?: string | null; - } - /** - * Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); \} public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); \} return resultBuilder.build(); \} // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; \} return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; \} static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; \} Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; \} [result autorelease]; return result; \} // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); \} var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); \}; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); \} resultBuilder.push(hexString); return resultBuilder.join(''); \}; // ... - */ - export interface Schema$GoogleTypeColor { + isDefault?: boolean | null; /** - * The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). + * The labels with user-defined metadata to organize your Tensorboards. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Tensorboard (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - alpha?: number | null; + labels?: {[key: string]: string} | null; /** - * The amount of blue in the color as a value in the interval [0, 1]. + * Output only. Name of the Tensorboard. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` */ - blue?: number | null; + name?: string | null; /** - * The amount of green in the color as a value in the interval [0, 1]. + * Output only. The number of Runs stored in this Tensorboard. */ - green?: number | null; + runCount?: number | null; /** - * The amount of red in the color as a value in the interval [0, 1]. + * Output only. Timestamp when this Tensorboard was last updated. */ - red?: number | null; + updateTime?: string | null; } /** - * Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp + * One blob (e.g, image, graph) viewable on a blob metric plot. */ - export interface Schema$GoogleTypeDate { + export interface Schema$GoogleCloudAiplatformV1TensorboardBlob { /** - * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. + * Optional. The bytes of the blob is not present unless it's returned by the ReadTensorboardBlobData endpoint. */ - day?: number | null; + data?: string | null; /** - * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. + * Output only. A URI safe key uniquely identifying a blob. Can be used to locate the blob stored in the Cloud Storage bucket of the consumer project. */ - month?: number | null; + id?: string | null; + } + /** + * One point viewable on a blob metric plot, but mostly just a wrapper message to work around repeated fields can't be used directly within `oneof` fields. + */ + export interface Schema$GoogleCloudAiplatformV1TensorboardBlobSequence { /** - * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. + * List of blobs contained within the sequence. */ - year?: number | null; + values?: Schema$GoogleCloudAiplatformV1TensorboardBlob[]; } /** - * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + * A TensorboardExperiment is a group of TensorboardRuns, that are typically the results of a training job run, in a Tensorboard. */ - export interface Schema$GoogleTypeExpr { + export interface Schema$GoogleCloudAiplatformV1TensorboardExperiment { /** - * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + * Output only. Timestamp when this TensorboardExperiment was created. + */ + createTime?: string | null; + /** + * Description of this TensorboardExperiment. */ description?: string | null; /** - * Textual representation of an expression in Common Expression Language syntax. + * User provided name of this TensorboardExperiment. */ - expression?: string | null; + displayName?: string | null; /** - * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - location?: string | null; + etag?: string | null; /** - * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + * The labels with user-defined metadata to organize your TensorboardExperiment. Label keys and values cannot be longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with `aiplatform.googleapis.com/` and are immutable. The following system labels exist for each Dataset: * `aiplatform.googleapis.com/dataset_metadata_schema`: output only. Its value is the metadata_schema's title. */ - title?: string | null; - } - /** - * Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time. - */ - export interface Schema$GoogleTypeInterval { + labels?: {[key: string]: string} | null; /** - * Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end. + * Output only. Name of the TensorboardExperiment. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` */ - endTime?: string | null; + name?: string | null; /** - * Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start. + * Immutable. Source of the TensorboardExperiment. Example: a custom training job. */ - startTime?: string | null; + source?: string | null; + /** + * Output only. Timestamp when this TensorboardExperiment was last updated. + */ + updateTime?: string | null; } /** - * Represents an amount of money with its currency type. + * TensorboardRun maps to a specific execution of a training job with a given set of hyperparameter values, model definition, dataset, etc */ - export interface Schema$GoogleTypeMoney { + export interface Schema$GoogleCloudAiplatformV1TensorboardRun { /** - * The three-letter currency code defined in ISO 4217. + * Output only. Timestamp when this TensorboardRun was created. */ - currencyCode?: string | null; + createTime?: string | null; /** - * Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000. + * Description of this TensorboardRun. */ - nanos?: number | null; + description?: string | null; /** - * The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. + * Required. User provided name of this TensorboardRun. This value must be unique among all TensorboardRuns belonging to the same parent TensorboardExperiment. */ - units?: string | null; - } - export interface Schema$IntelligenceCloudAutomlXpsMetricEntry { + displayName?: string | null; /** - * For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave empty. + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - argentumMetricId?: string | null; + etag?: string | null; /** - * A double value. + * The labels with user-defined metadata to organize your TensorboardRuns. This field will be used to filter and visualize Runs in the Tensorboard UI. For example, a Vertex AI training job can set a label aiplatform.googleapis.com/training_job_id=xxxxx to all the runs created within that job. An end user can set a label experiment_id=xxxxx for all the runs produced in a Jupyter notebook. These runs can be grouped by a label value and visualized together in the Tensorboard UI. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one TensorboardRun (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - doubleValue?: number | null; + labels?: {[key: string]: string} | null; /** - * A signed 64-bit integer value. + * Output only. Name of the TensorboardRun. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` */ - int64Value?: string | null; + name?: string | null; /** - * The metric name defined in the service configuration. + * Output only. Timestamp when this TensorboardRun was last updated. */ - metricName?: string | null; + updateTime?: string | null; + } + /** + * One point viewable on a tensor metric plot. + */ + export interface Schema$GoogleCloudAiplatformV1TensorboardTensor { /** - * Billing system labels for this (metric, value) pair. + * Required. Serialized form of https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto */ - systemLabels?: Schema$IntelligenceCloudAutomlXpsMetricEntryLabel[]; - } - export interface Schema$IntelligenceCloudAutomlXpsMetricEntryLabel { + value?: string | null; /** - * The name of the label. + * Optional. Version number of TensorProto used to serialize value. */ - labelName?: string | null; + versionNumber?: number | null; + } + /** + * TensorboardTimeSeries maps to times series produced in training runs + */ + export interface Schema$GoogleCloudAiplatformV1TensorboardTimeSeries { /** - * The value of the label. + * Output only. Timestamp when this TensorboardTimeSeries was created. */ - labelValue?: string | null; - } - export interface Schema$IntelligenceCloudAutomlXpsReportingMetrics { + createTime?: string | null; /** - * The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set. + * Description of this TensorboardTimeSeries. */ - effectiveTrainingDuration?: string | null; + description?: string | null; /** - * One entry per metric name. The values must be aggregated per metric name. + * Required. User provided name of this TensorboardTimeSeries. This value should be unique among all TensorboardTimeSeries resources belonging to the same TensorboardRun resource (parent resource). */ - metricEntries?: Schema$IntelligenceCloudAutomlXpsMetricEntry[]; - } - /** - * The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30 - */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoDocAttribution { - amarnaId?: string | null; - arxivId?: string | null; - author?: string | null; - bibkey?: string | null; + displayName?: string | null; /** - * ID of the paper in bioarxiv like ddoi.org/{biorxiv_id\} eg: https://doi.org/10.1101/343517 + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - biorxivId?: string | null; - bookTitle?: string | null; + etag?: string | null; /** - * The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. + * Output only. Scalar, Tensor, or Blob metadata for this TensorboardTimeSeries. */ - bookVolumeId?: string | null; - category?: string | null; - conversationId?: string | null; + metadata?: Schema$GoogleCloudAiplatformV1TensorboardTimeSeriesMetadata; /** - * The dataset this document comes from. + * Output only. Name of the TensorboardTimeSeries. */ - dataset?: string | null; - filepath?: string | null; - geminiId?: string | null; - gnewsArticleTitle?: string | null; - goodallExampleId?: string | null; + name?: string | null; /** - * Whether the document is opted out. + * Data of the current plugin, with the size limited to 65KB. */ - isOptOut?: boolean | null; - isPrompt?: boolean | null; - lamdaExampleId?: string | null; - license?: string | null; - meenaConversationId?: string | null; + pluginData?: string | null; /** - * Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. + * Immutable. Name of the plugin this time series pertain to. Such as Scalar, Tensor, Blob */ - naturalLanguageCode?: string | null; + pluginName?: string | null; /** - * True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. + * Output only. Timestamp when this TensorboardTimeSeries was last updated. */ - noAttribution?: boolean | null; - podcastUtteranceId?: string | null; - publicationDate?: Schema$GoogleTypeDate; + updateTime?: string | null; /** - * This field is for opt-out experiment only, MUST never be used during actual production/serving. + * Required. Immutable. Type of TensorboardTimeSeries value. */ - qualityScoreExperimentOnly?: number | null; + valueType?: string | null; + } + /** + * Describes metadata for a TensorboardTimeSeries. + */ + export interface Schema$GoogleCloudAiplatformV1TensorboardTimeSeriesMetadata { /** - * Github repository + * Output only. The largest blob sequence length (number of blobs) of all data points in this time series, if its ValueType is BLOB_SEQUENCE. */ - repo?: string | null; + maxBlobSequenceLength?: string | null; /** - * URL of a webdoc + * Output only. Max step index of all data points within a TensorboardTimeSeries. */ - url?: string | null; - volumeId?: string | null; + maxStep?: string | null; /** - * Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. + * Output only. Max wall clock timestamp of all data points within a TensorboardTimeSeries. */ - wikipediaArticleTitle?: string | null; + maxWallTime?: string | null; + } + /** + * The storage details for TFRecord output content. + */ + export interface Schema$GoogleCloudAiplatformV1TFRecordDestination { /** - * The unique video id from Youtube. Example: AkoGsW52Ir0 + * Required. Google Cloud Storage location. */ - youtubeVideoId?: string | null; + gcsDestination?: Schema$GoogleCloudAiplatformV1GcsDestination; } /** - * The recitation result for one input + * The config for feature monitoring threshold. */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoRecitationResult { - dynamicSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; + export interface Schema$GoogleCloudAiplatformV1ThresholdConfig { /** - * The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK \> CITE \> NO_ACTION. When the given input is not found in any source, the recitation action will not be specified. + * Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. */ - recitationAction?: string | null; - trainingSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; + value?: number | null; } /** - * The recitation result for each segment in a given input. + * All the data stored in a TensorboardTimeSeries. */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult { + export interface Schema$GoogleCloudAiplatformV1TimeSeriesData { /** - * The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. + * Required. The ID of the TensorboardTimeSeries, which will become the final component of the TensorboardTimeSeries' resource name */ - attributionDataset?: string | null; + tensorboardTimeSeriesId?: string | null; /** - * human-friendly string that contains information from doc_attribution which could be shown by clients + * Required. Data points in this time series. */ - displayAttributionMessage?: string | null; - docAttribution?: Schema$LanguageLabsAidaTrustRecitationProtoDocAttribution; + values?: Schema$GoogleCloudAiplatformV1TimeSeriesDataPoint[]; /** - * number of documents that contained this segment + * Required. Immutable. The value type of this time series. All the values in this time series data must match this value type. */ - docOccurrences?: number | null; - endIndex?: number | null; + valueType?: string | null; + } + /** + * A TensorboardTimeSeries data point. + */ + export interface Schema$GoogleCloudAiplatformV1TimeSeriesDataPoint { /** - * The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. + * A blob sequence value. */ - rawText?: string | null; - segmentRecitationAction?: string | null; + blobs?: Schema$GoogleCloudAiplatformV1TensorboardBlobSequence; /** - * The category of the source dataset where the segment came from. This is more stable than Dataset. + * A scalar value. */ - sourceCategory?: string | null; + scalar?: Schema$GoogleCloudAiplatformV1Scalar; /** - * The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. + * Step index of this data point within the run. */ - startIndex?: number | null; + step?: string | null; + /** + * A tensor value. + */ + tensor?: Schema$GoogleCloudAiplatformV1TensorboardTensor; + /** + * Wall clock timestamp when this data point is generated by the end user. + */ + wallTime?: string | null; } /** - * The recitation result for one stream input + * Assigns input data to training, validation, and test sets based on a provided timestamps. The youngest data pieces are assigned to training set, next to validation set, and the oldest to the test set. Supported only for tabular Datasets. */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoStreamRecitationResult { + export interface Schema$GoogleCloudAiplatformV1TimestampSplit { /** - * The recitation result against the given dynamic data source. + * Required. The key is a name of one of the Dataset's data columns. The values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. */ - dynamicSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; + key?: string | null; /** - * Last index of input text fully checked for recitation in the entire streaming context. Would return `-1` if no Input was checked for recitation. + * The fraction of the input data that is to be used to evaluate the Model. */ - fullyCheckedTextIndex?: number | null; + testFraction?: number | null; /** - * The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK \> CITE \> NO_ACTION. + * The fraction of the input data that is to be used to train the Model. */ - recitationAction?: string | null; + trainingFraction?: number | null; /** - * The recitation result against model training data. + * The fraction of the input data that is to be used to validate the Model. */ - trainingSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; + validationFraction?: number | null; } /** - * Recitation check result for a single content chunk. + * Tokens info with a list of tokens and the corresponding list of token ids. */ - export interface Schema$LearningGenaiRecitationContentChunkRecitationCheckResult { - imageResult?: Schema$LearningGenaiRecitationImageRecitationCheckResult; - textResult?: Schema$LearningGenaiRecitationRecitationResult; + export interface Schema$GoogleCloudAiplatformV1TokensInfo { + /** + * A list of token ids from the input. + */ + tokenIds?: string[] | null; + /** + * A list of tokens from the input. + */ + tokens?: string[] | null; } /** - * The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30 + * Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). */ - export interface Schema$LearningGenaiRecitationDocAttribution { - amarnaId?: string | null; - arxivId?: string | null; - author?: string | null; - bibkey?: string | null; - /** - * ID of the paper in bioarxiv like ddoi.org/{biorxiv_id\} eg: https://doi.org/10.1101/343517 - */ - biorxivId?: string | null; - bookTitle?: string | null; + export interface Schema$GoogleCloudAiplatformV1Tool { /** - * The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. + * Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. */ - bookVolumeId?: string | null; - conversationId?: string | null; + functionDeclarations?: Schema$GoogleCloudAiplatformV1FunctionDeclaration[]; /** - * The dataset this document comes from. + * Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. */ - dataset?: string | null; - filepath?: string | null; - geminiId?: string | null; - gnewsArticleTitle?: string | null; - goodallExampleId?: string | null; + retrieval?: Schema$GoogleCloudAiplatformV1Retrieval; + } + /** + * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. + */ + export interface Schema$GoogleCloudAiplatformV1TrainingConfig { /** - * Whether the document is opted out. + * The timeout hours for the CMLE training job, expressed in milli hours i.e. 1,000 value in this field means 1 hour. */ - isOptOut?: boolean | null; + timeoutTrainingMilliHours?: string | null; + } + /** + * The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from Vertex AI's Dataset which becomes the training input, upload the Model to Vertex AI, and evaluate the Model. + */ + export interface Schema$GoogleCloudAiplatformV1TrainingPipeline { /** - * When true, this attribution came from the user's prompt. + * Output only. Time when the TrainingPipeline was created. */ - isPrompt?: boolean | null; - lamdaExampleId?: string | null; - license?: string | null; - meenaConversationId?: string | null; + createTime?: string | null; /** - * Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. + * Required. The user-defined name of this TrainingPipeline. */ - naturalLanguageCode?: string | null; + displayName?: string | null; /** - * True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. + * Customer-managed encryption key spec for a TrainingPipeline. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if model_to_upload is not set separately. */ - noAttribution?: boolean | null; - podcastUtteranceId?: string | null; - publicationDate?: Schema$GoogleTypeDate; + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; /** - * This field is for opt-out experiment only, MUST never be used during actual production/serving. + * Output only. Time when the TrainingPipeline entered any of the following states: `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, `PIPELINE_STATE_CANCELLED`. */ - qualityScoreExperimentOnly?: number | null; + endTime?: string | null; /** - * Github repository + * Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or `PIPELINE_STATE_CANCELLED`. */ - repo?: string | null; + error?: Schema$GoogleRpcStatus; /** - * URL of a webdoc + * Specifies Vertex AI owned input data that may be used for training the Model. The TrainingPipeline's training_task_definition should make clear whether this config is used and if there are any special requirements on how it should be filled. If nothing about this config is mentioned in the training_task_definition, then it should be assumed that the TrainingPipeline does not depend on this configuration. */ - url?: string | null; - volumeId?: string | null; + inputDataConfig?: Schema$GoogleCloudAiplatformV1InputDataConfig; /** - * Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. + * The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - wikipediaArticleTitle?: string | null; - youtubeVideoId?: string | null; - } - /** - * Attribution information about the recited image. - */ - export interface Schema$LearningGenaiRecitationImageDocAttribution { + labels?: {[key: string]: string} | null; /** - * Unique ID of the image. + * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. */ - datasetName?: string | null; + modelId?: string | null; /** - * Doc ID to identify the image. These could be urls of images or amarna id. + * Describes the Model that may be uploaded (via ModelService.UploadModel) by this TrainingPipeline. The TrainingPipeline's training_task_definition should make clear whether this Model description should be populated, and if there are any special requirements regarding how it should be filled. If nothing is mentioned in the training_task_definition, then it should be assumed that this field should not be filled and the training task either uploads the Model without a need of this information, or that training task does not support uploading a Model as part of the pipeline. When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and the trained Model had been uploaded into Vertex AI, then the model_to_upload's resource name is populated. The Model is always uploaded into the Project and Location in which this pipeline is. */ - stringDocids?: string | null; - } - export interface Schema$LearningGenaiRecitationImageRecitationCheckResult { + modelToUpload?: Schema$GoogleCloudAiplatformV1Model; /** - * Only has NO_ACTION or BLOCK to start with. + * Output only. Resource name of the TrainingPipeline. */ - recitationAction?: string | null; + name?: string | null; /** - * Images that are similar to the requested image. + * Optional. When specify this field, the `model_to_upload` will not be uploaded as a new model, instead, it will become a new version of this `parent_model`. */ - recitedImages?: Schema$LearningGenaiRecitationImageRecitationCheckResultSimilarImage[]; - } - export interface Schema$LearningGenaiRecitationImageRecitationCheckResultSimilarImage { + parentModel?: string | null; /** - * Attribution information about the image + * Output only. Time when the TrainingPipeline for the first time entered the `PIPELINE_STATE_RUNNING` state. */ - docAttribution?: Schema$LearningGenaiRecitationImageDocAttribution; + startTime?: string | null; /** - * The memorization embedding model that returned this image + * Output only. The detailed state of the pipeline. */ - embeddingModel?: string | null; + state?: string | null; /** - * Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image. + * Required. A Google Cloud Storage path to the YAML file that defines the training task which is responsible for producing the model artifact, and may also include additional auxiliary work. The definition files that can be used here are found in gs://google-cloud-aiplatform/schema/trainingjob/definition/. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - imageId?: string | null; + trainingTaskDefinition?: string | null; /** - * Similarity score of requested image compared with image in training data. + * Required. The training task's parameter(s), as specified in the training_task_definition's `inputs`. */ - scores?: number | null; - } - /** - * Recitation check result for a stream of content chunks (e.g. a model response). - */ - export interface Schema$LearningGenaiRecitationMMRecitationCheckResult { - chunkResults?: Schema$LearningGenaiRecitationContentChunkRecitationCheckResult[]; + trainingTaskInputs?: any | null; /** - * Overall recommended recitation action for the content. + * Output only. The metadata information as specified in the training_task_definition's `metadata`. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's training_task_definition contains `metadata` object. */ - recitationAction?: string | null; - } - /** - * The recitation result for one input - */ - export interface Schema$LearningGenaiRecitationRecitationResult { - dynamicSegmentResults?: Schema$LearningGenaiRecitationSegmentResult[]; + trainingTaskMetadata?: any | null; /** - * The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK \> CITE \> NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION. + * Output only. Time when the TrainingPipeline was most recently updated. */ - recitationAction?: string | null; - trainingSegmentResults?: Schema$LearningGenaiRecitationSegmentResult[]; + updateTime?: string | null; } /** - * The recitation result for each segment in a given input. + * A message representing a Trial. A Trial contains a unique set of Parameters that has been or will be evaluated, along with the objective metrics got by running the Trial. */ - export interface Schema$LearningGenaiRecitationSegmentResult { + export interface Schema$GoogleCloudAiplatformV1Trial { /** - * The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. + * Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique client_id. When a client asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client should evaluate the Trial, complete it, and report back to Vertex AI Vizier. If suggestion is asked again by same client_id before the Trial is completed, the same Trial will be returned. Multiple clients with different client_ids can ask for suggestions simultaneously, each of them will get their own Trial. */ - attributionDataset?: string | null; + clientId?: string | null; /** - * human-friendly string that contains information from doc_attribution which could be shown by clients + * Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. */ - displayAttributionMessage?: string | null; - docAttribution?: Schema$LearningGenaiRecitationDocAttribution; + customJob?: string | null; /** - * number of documents that contained this segment + * Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`. */ - docOccurrences?: number | null; - endIndex?: number | null; + endTime?: string | null; /** - * The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. + * Output only. The final measurement containing the objective value. */ - rawText?: string | null; - segmentRecitationAction?: string | null; + finalMeasurement?: Schema$GoogleCloudAiplatformV1Measurement; /** - * The category of the source dataset where the segment came from. This is more stable than Dataset. + * Output only. The identifier of the Trial assigned by the service. */ - sourceCategory?: string | null; + id?: string | null; /** - * The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. + * Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is `INFEASIBLE`. */ - startIndex?: number | null; - } - /** - * The type used for final weights calculation. - */ - export interface Schema$LearningGenaiRootCalculationType { - scoreType?: string | null; - weights?: number | null; - } - export interface Schema$LearningGenaiRootClassifierOutput { + infeasibleReason?: string | null; /** - * If set, this is the output of the first matching rule. + * Output only. A list of measurements that are strictly lexicographically ordered by their induced tuples (steps, elapsed_duration). These are used for early stopping computations. */ - ruleOutput?: Schema$LearningGenaiRootRuleOutput; + measurements?: Schema$GoogleCloudAiplatformV1Measurement[]; /** - * outputs of all matching rule. + * Output only. Resource name of the Trial assigned by the service. */ - ruleOutputs?: Schema$LearningGenaiRootRuleOutput[]; + name?: string | null; /** - * The results of data_providers and metrics. + * Output only. The parameters of the Trial. */ - state?: Schema$LearningGenaiRootClassifierState; - } - export interface Schema$LearningGenaiRootClassifierOutputSummary { - metrics?: Schema$LearningGenaiRootMetricOutput[]; + parameters?: Schema$GoogleCloudAiplatformV1TrialParameter[]; /** - * Output of the first matching rule. + * Output only. Time when the Trial was started. */ - ruleOutput?: Schema$LearningGenaiRootRuleOutput; + startTime?: string | null; /** - * outputs of all matching rule. + * Output only. The detailed state of the Trial. */ - ruleOutputs?: Schema$LearningGenaiRootRuleOutput[]; - } - /** - * DataProviderOutput and MetricOutput can be saved between calls to the Classifier framework. For instance, you can run the query classifier, get outputs from those metrics, then use them in a result classifier as well. Example rule based on this idea: and_rules { rule { metric_name: 'query_safesearch_v2' ... \} rule { metric_name: 'response_safesearch_v2' ... \} \} - */ - export interface Schema$LearningGenaiRootClassifierState { - dataProviderOutput?: Schema$LearningGenaiRootDataProviderOutput[]; - metricOutput?: Schema$LearningGenaiRootMetricOutput[]; - } - /** - * Stores all metadata relating to AIDA DoConversation. - */ - export interface Schema$LearningGenaiRootCodeyChatMetadata { + state?: string | null; /** - * Indicates the programming language of the code if the message is a code chunk. + * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if this trial is part of a HyperparameterTuningJob and the job's trial_job_spec.enable_web_access field is `true`. The keys are names of each node used for the trial; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. */ - codeLanguage?: string | null; + webAccessUris?: {[key: string]: string} | null; } /** - * Describes a sample at a checkpoint for post-processing. + * Next ID: 3 */ - export interface Schema$LearningGenaiRootCodeyCheckpoint { - /** - * Metadata that describes what was truncated at this checkpoint. - */ - codeyTruncatorMetadata?: Schema$LearningGenaiRootCodeyTruncatorMetadata; + export interface Schema$GoogleCloudAiplatformV1TrialContext { /** - * Current state of the sample after truncator. + * A human-readable field which can store a description of this context. This will become part of the resulting Trial's description field. */ - currentSample?: string | null; + description?: string | null; /** - * Postprocessor run that yielded this checkpoint. + * If/when a Trial is generated or selected from this Context, its Parameters will match any parameters specified here. (I.e. if this context specifies parameter name:'a' int_value:3, then a resulting Trial will have int_value:3 for its parameter named 'a'.) Note that we first attempt to match existing REQUESTED Trials with contexts, and if there are no matches, we generate suggestions in the subspace defined by the parameters specified here. NOTE: a Context without any Parameters matches the entire feasible search space. */ - postInferenceStep?: string | null; - } - /** - * Stores all metadata relating to Completion. - */ - export interface Schema$LearningGenaiRootCodeyCompletionMetadata { - checkpoints?: Schema$LearningGenaiRootCodeyCheckpoint[]; + parameters?: Schema$GoogleCloudAiplatformV1TrialParameter[]; } /** - * Stores all metadata relating to GenerateCode. + * A message representing a parameter to be tuned. */ - export interface Schema$LearningGenaiRootCodeyGenerationMetadata { + export interface Schema$GoogleCloudAiplatformV1TrialParameter { /** - * Last state of the sample before getting dropped/returned. + * Output only. The ID of the parameter. The parameter should be defined in StudySpec's Parameters. */ - output?: string | null; + parameterId?: string | null; /** - * Last Codey postprocessing step for this sample before getting dropped/returned. + * Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'. */ - postInferenceStep?: string | null; - } - /** - * Top-level wrapper used to store all things codey-related. - */ - export interface Schema$LearningGenaiRootCodeyOutput { - codeyChatMetadata?: Schema$LearningGenaiRootCodeyChatMetadata; - codeyCompletionMetadata?: Schema$LearningGenaiRootCodeyCompletionMetadata; - codeyGenerationMetadata?: Schema$LearningGenaiRootCodeyGenerationMetadata; + value?: any | null; } /** - * Metadata describing what was truncated at each checkpoint. + * The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. */ - export interface Schema$LearningGenaiRootCodeyTruncatorMetadata { + export interface Schema$GoogleCloudAiplatformV1TunedModel { /** - * Index of the current sample that trims off truncated text. + * Output only. A resource name of an Endpoint. Format: `projects/{project\}/locations/{location\}/endpoints/{endpoint\}`. */ - cutoffIndex?: number | null; + endpoint?: string | null; /** - * Text that was truncated at a specific checkpoint. + * Output only. The resource name of the TunedModel. Format: `projects/{project\}/locations/{location\}/models/{model\}`. */ - truncatedText?: string | null; + model?: string | null; } /** - * Score threshold for a category. + * The tuning data statistic values for TuningJob. */ - export interface Schema$LearningGenaiRootControlDecodingConfigThreshold { - policy?: string | null; - scoreMax?: number | null; + export interface Schema$GoogleCloudAiplatformV1TuningDataStats { + /** + * The SFT Tuning data stats. + */ + supervisedTuningDataStats?: Schema$GoogleCloudAiplatformV1SupervisedTuningDataStats; } /** - * Holds one control decoding record. + * Represents a TuningJob that runs with Google owned models. */ - export interface Schema$LearningGenaiRootControlDecodingRecord { + export interface Schema$GoogleCloudAiplatformV1TuningJob { /** - * Prefixes feeded into scorer. + * The base model that is being tuned, e.g., "gemini-1.0-pro-002". */ - prefixes?: string | null; + baseModel?: string | null; /** - * Per policy scores returned from Scorer. Expect to have the same number of scores as in `thresholds`. + * Output only. Time when the TuningJob was created. */ - scores?: Schema$LearningGenaiRootControlDecodingRecordPolicyScore[]; + createTime?: string | null; /** - * Suffixes feeded into scorer. + * Optional. The description of the TuningJob. */ - suffiexes?: string | null; + description?: string | null; /** - * Per policy thresholds from user config. + * Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key. */ - thresholds?: Schema$LearningGenaiRootControlDecodingConfigThreshold[]; - } - export interface Schema$LearningGenaiRootControlDecodingRecordPolicyScore { - policy?: string | null; - score?: number | null; - } - export interface Schema$LearningGenaiRootControlDecodingRecords { + encryptionSpec?: Schema$GoogleCloudAiplatformV1EncryptionSpec; + /** + * Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. + */ + endTime?: string | null; /** - * One ControlDecodingRecord record maps to one rewind. + * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ - records?: Schema$LearningGenaiRootControlDecodingRecord[]; - } - export interface Schema$LearningGenaiRootDataProviderOutput { - name?: string | null; + error?: Schema$GoogleRpcStatus; /** - * If set, this DataProvider failed and this is the error message. + * Output only. The Experiment associated with this TuningJob. */ - status?: Schema$UtilStatusProto; - } - export interface Schema$LearningGenaiRootFilterMetadata { + experiment?: string | null; /** - * Filter confidence. + * Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - confidence?: string | null; + labels?: {[key: string]: string} | null; /** - * Debug info for the message. + * Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project\}/locations/{location\}/tuningJobs/{tuning_job\}` */ - debugInfo?: Schema$LearningGenaiRootFilterMetadataFilterDebugInfo; + name?: string | null; /** - * A fallback message chosen by the applied filter. + * Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state. */ - fallback?: string | null; + startTime?: string | null; /** - * Additional info for the filter. + * Output only. The detailed state of the job. */ - info?: string | null; + state?: string | null; /** - * Name of the filter that triggered. + * Tuning Spec for Supervised Fine Tuning. */ - name?: string | null; + supervisedTuningSpec?: Schema$GoogleCloudAiplatformV1SupervisedTuningSpec; /** - * Filter reason. + * Output only. The tuned model resources assiociated with this TuningJob. */ - reason?: string | null; + tunedModel?: Schema$GoogleCloudAiplatformV1TunedModel; /** - * The input query or generated response that is getting filtered. + * Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - text?: string | null; - } - export interface Schema$LearningGenaiRootFilterMetadataFilterDebugInfo { - classifierOutput?: Schema$LearningGenaiRootClassifierOutput; - defaultMetadata?: string | null; - languageFilterResult?: Schema$LearningGenaiRootLanguageFilterResult; + tunedModelDisplayName?: string | null; /** - * Safety filter output information for LLM Root RAI harm check. + * Output only. The tuning data statistics associated with this TuningJob. */ - raiOutput?: Schema$LearningGenaiRootRAIOutput; - raiResult?: Schema$CloudAiNlLlmProtoServiceRaiResult; - raiSignal?: Schema$CloudAiNlLlmProtoServiceRaiSignal; + tuningDataStats?: Schema$GoogleCloudAiplatformV1TuningDataStats; /** - * Number of rewinds by controlled decoding. + * Output only. Time when the TuningJob was most recently updated. */ - records?: Schema$LearningGenaiRootControlDecodingRecords; - streamRecitationResult?: Schema$LanguageLabsAidaTrustRecitationProtoStreamRecitationResult; - takedownResult?: Schema$LearningGenaiRootTakedownResult; - toxicityResult?: Schema$LearningGenaiRootToxicityResult; + updateTime?: string | null; } - export interface Schema$LearningGenaiRootGroundingMetadata { - citations?: Schema$LearningGenaiRootGroundingMetadataCitation[]; + /** + * Runtime operation information for IndexEndpointService.UndeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1UndeployIndexOperationMetadata { /** - * True if grounding is cancelled, for example, no facts being retrieved. + * The operation generic information. */ - groundingCancelled?: boolean | null; - searchQueries?: string[] | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } - export interface Schema$LearningGenaiRootGroundingMetadataCitation { + /** + * Request message for IndexEndpointService.UndeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1UndeployIndexRequest { /** - * Index in the prediction output where the citation ends (exclusive). Must be \> start_index and <= len(output). + * Required. The ID of the DeployedIndex to be undeployed from the IndexEndpoint. */ - endIndex?: number | null; + deployedIndexId?: string | null; + } + /** + * Response message for IndexEndpointService.UndeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1UndeployIndexResponse {} + /** + * Runtime operation information for EndpointService.UndeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1UndeployModelOperationMetadata { /** - * Index of the fact supporting this claim. Should be within the range of the `world_facts` in the GenerateResponse. + * The operation generic information. */ - factIndex?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Request message for EndpointService.UndeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1UndeployModelRequest { /** - * Confidence score of this entailment. Value is [0,1] with 1 is the most confidence. + * Required. The ID of the DeployedModel to be undeployed from the Endpoint. */ - score?: number | null; + deployedModelId?: string | null; /** - * Index in the prediction output where the citation starts (inclusive). Must be \>= 0 and < end_index. + * If this field is provided, then the Endpoint's traffic_split will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A DeployedModel will be successfully undeployed only if it doesn't have any traffic assigned to it when this method executes, or if this field unassigns any traffic to it. */ - startIndex?: number | null; + trafficSplit?: {[key: string]: number} | null; } - export interface Schema$LearningGenaiRootHarm { + /** + * Response message for EndpointService.UndeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1UndeployModelResponse {} + /** + * Contains model information necessary to perform batch prediction without requiring a full model import. + */ + export interface Schema$GoogleCloudAiplatformV1UnmanagedContainerModel { /** - * Please do not use, this is still under development. + * The path to the directory containing the Model artifact and any of its supporting files. */ - contextualDangerous?: boolean | null; - csam?: boolean | null; - fringe?: boolean | null; - grailImageHarmType?: Schema$LearningGenaiRootHarmGrailImageHarmType; - grailTextHarmType?: Schema$LearningGenaiRootHarmGrailTextHarmType; - imageChild?: boolean | null; - imageCsam?: boolean | null; - imagePedo?: boolean | null; + artifactUri?: string | null; /** - * Image signals + * Input only. The specification of the container that is to be used when deploying this Model. */ - imagePorn?: boolean | null; - imageViolence?: boolean | null; - pqc?: boolean | null; - safetycat?: Schema$LearningGenaiRootHarmSafetyCatCategories; + containerSpec?: Schema$GoogleCloudAiplatformV1ModelContainerSpec; /** - * Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l=77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for "POSSIBLE" is 3 / 5 = 0.6 . + * Contains the schemata used in Model's predictions and explanations */ - spii?: Schema$LearningGenaiRootHarmSpiiFilter; - threshold?: number | null; - videoFrameChild?: boolean | null; - videoFrameCsam?: boolean | null; - videoFramePedo?: boolean | null; + predictSchemata?: Schema$GoogleCloudAiplatformV1PredictSchemata; + } + /** + * Runtime operation information for UpdateDeploymentResourcePool method. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateDeploymentResourcePoolOperationMetadata { /** - * Video frame signals + * The operation generic information. */ - videoFramePorn?: boolean | null; - videoFrameViolence?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Harm type for images + * Runtime operation information for ModelService.UpdateExplanationDataset. */ - export interface Schema$LearningGenaiRootHarmGrailImageHarmType { - imageHarmType?: string[] | null; + export interface Schema$GoogleCloudAiplatformV1UpdateExplanationDatasetOperationMetadata { + /** + * The common part of the operation metadata. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } /** - * Harm type for text + * Request message for ModelService.UpdateExplanationDataset. */ - export interface Schema$LearningGenaiRootHarmGrailTextHarmType { - harmType?: string[] | null; + export interface Schema$GoogleCloudAiplatformV1UpdateExplanationDatasetRequest { + /** + * The example config containing the location of the dataset. + */ + examples?: Schema$GoogleCloudAiplatformV1Examples; } /** - * LINT.ThenChange(//depot/google3/learning/genai/root/util/classifier/backends/grail/grail.cc) + * Response message of ModelService.UpdateExplanationDataset operation. */ - export interface Schema$LearningGenaiRootHarmSafetyCatCategories { - categories?: string[] | null; - } + export interface Schema$GoogleCloudAiplatformV1UpdateExplanationDatasetResponse {} /** - * LINT.IfChange + * Details of operations that perform update FeatureGroup. */ - export interface Schema$LearningGenaiRootHarmSpiiFilter { - usBankRoutingMicr?: boolean | null; - usEmployerIdentificationNumber?: boolean | null; - usSocialSecurityNumber?: boolean | null; - } - export interface Schema$LearningGenaiRootInternalMetadata { - scoredTokens?: Schema$LearningGenaiRootScoredToken[]; - } - export interface Schema$LearningGenaiRootLanguageFilterResult { + export interface Schema$GoogleCloudAiplatformV1UpdateFeatureGroupOperationMetadata { /** - * False when query or response should be filtered out due to unsupported language. + * Operation metadata for FeatureGroup. */ - allowed?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Details of operations that perform update FeatureOnlineStore. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateFeatureOnlineStoreOperationMetadata { /** - * Language of the query or response. + * Operation metadata for FeatureOnlineStore. */ - detectedLanguage?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Details of operations that perform update Feature. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateFeatureOperationMetadata { /** - * Probability of the language predicted as returned by LangID. + * Operation metadata for Feature Update. */ - detectedLanguageProbability?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } - export interface Schema$LearningGenaiRootMetricOutput { - debug?: string | null; + /** + * Details of operations that perform update Featurestore. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateFeaturestoreOperationMetadata { /** - * Name of the metric. + * Operation metadata for Featurestore. */ - name?: string | null; - numericValue?: number | null; - status?: Schema$UtilStatusProto; - stringValue?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } - export interface Schema$LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata { + /** + * Details of operations that perform update FeatureView. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateFeatureViewOperationMetadata { /** - * Latency spent on fact retrievals. There might be multiple retrievals from different fact providers. + * Operation metadata for FeatureView Update. */ - factRetrievalMillisecondsByProvider?: {[key: string]: string} | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Runtime operation information for IndexService.UpdateIndex. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateIndexOperationMetadata { /** - * Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt. + * The operation generic information. */ - prompt2queryMilliseconds?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * Latency if use GroundedGeneration service for the whole retrieval & augmentation. + * The operation metadata with regard to Matching Engine Index operation. */ - retrievalAugmentMilliseconds?: string | null; + nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata; } /** - * This is per harm. + * Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob. */ - export interface Schema$LearningGenaiRootRAIOutput { - allowed?: boolean | null; - harm?: Schema$LearningGenaiRootHarm; - name?: string | null; - score?: number | null; + export interface Schema$GoogleCloudAiplatformV1UpdateModelDeploymentMonitoringJobOperationMetadata { + /** + * The operation generic information. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; } - export interface Schema$LearningGenaiRootRegexTakedownResult { + /** + * Details of operations that perform update PersistentResource. + */ + export interface Schema$GoogleCloudAiplatformV1UpdatePersistentResourceOperationMetadata { /** - * False when query or response should be taken down due to match with a blocked regex, true otherwise. + * Operation metadata for PersistentResource. */ - allowed?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * Regex used to decide that query or response should be taken down. Empty when query or response is kept. + * Progress Message for Update LRO */ - takedownRegex?: string | null; + progressMessage?: string | null; } - export interface Schema$LearningGenaiRootRequestMetrics { + /** + * Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateSpecialistPoolOperationMetadata { /** - * Metrics for audio samples in the request. + * The operation generic information. */ - audioMetrics?: Schema$LearningGenaiRootRequestMetricsAudioMetrics; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * Metrics for image samples in the request. + * Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id\}/locations/{location_id\}/specialistPools/{specialist_pool\}` */ - imageMetrics?: Schema$LearningGenaiRootRequestMetricsImageMetrics; + specialistPool?: string | null; + } + /** + * Details of operations that perform update Tensorboard. + */ + export interface Schema$GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata { /** - * Number of text tokens extracted from the request. + * Operation metadata for Tensorboard. */ - textTokenCount?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Metadata information for NotebookService.UpgradeNotebookRuntime. + */ + export interface Schema$GoogleCloudAiplatformV1UpgradeNotebookRuntimeOperationMetadata { /** - * Total number of tokens in the request. + * The operation generic information. */ - totalTokenCount?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; /** - * Metrics for video samples in the request. + * A human-readable message that shows the intermediate progress details of NotebookRuntime. */ - videoMetrics?: Schema$LearningGenaiRootRequestMetricsVideoMetrics; + progressMessage?: string | null; } - export interface Schema$LearningGenaiRootRequestMetricsAudioMetrics { + /** + * Request message for NotebookService.UpgradeNotebookRuntime. + */ + export interface Schema$GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest {} + /** + * Details of ModelService.UploadModel operation. + */ + export interface Schema$GoogleCloudAiplatformV1UploadModelOperationMetadata { /** - * Duration of the audio sample in seconds. + * The common part of the operation metadata. */ - audioDuration?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1GenericOperationMetadata; + } + /** + * Request message for ModelService.UploadModel. + */ + export interface Schema$GoogleCloudAiplatformV1UploadModelRequest { /** - * Number of tokens derived directly from audio data. + * Required. The Model to create. */ - audioTokenCount?: number | null; + model?: Schema$GoogleCloudAiplatformV1Model; /** - * Number of audio frames in the audio. + * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. */ - numAudioFrames?: number | null; - } - export interface Schema$LearningGenaiRootRequestMetricsImageMetrics { + modelId?: string | null; /** - * Number of tokens extracted from image bytes. + * Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. */ - imageTokenCount?: number | null; + parentModel?: string | null; /** - * Number of images in the request. + * Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). */ - numImages?: number | null; + serviceAccount?: string | null; } - export interface Schema$LearningGenaiRootRequestMetricsVideoMetrics { + /** + * Response message of ModelService.UploadModel operation. + */ + export interface Schema$GoogleCloudAiplatformV1UploadModelResponse { /** - * Metrics associated with audio sample in the video. + * The name of the uploaded Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - audioSample?: Schema$LearningGenaiRootRequestMetricsAudioMetrics; + model?: string | null; /** - * Number of video frames in the video. + * Output only. The version ID of the model that is uploaded. */ - numVideoFrames?: number | null; + modelVersionId?: string | null; + } + /** + * Request message for IndexService.UpsertDatapoints + */ + export interface Schema$GoogleCloudAiplatformV1UpsertDatapointsRequest { /** - * Duration of the video sample in seconds. + * A list of datapoints to be created/updated. */ - videoDuration?: string | null; + datapoints?: Schema$GoogleCloudAiplatformV1IndexDatapoint[]; /** - * Number of tokens extracted from video frames. + * Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts. */ - videoFramesTokenCount?: number | null; + updateMask?: string | null; } - export interface Schema$LearningGenaiRootRequestResponseTakedownResult { + /** + * Response message for IndexService.UpsertDatapoints + */ + export interface Schema$GoogleCloudAiplatformV1UpsertDatapointsResponse {} + /** + * References an API call. It contains more information about long running operation and Jobs that are triggered by the API call. + */ + export interface Schema$GoogleCloudAiplatformV1UserActionReference { /** - * False when response has to be taken down per above config. + * For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project\}/locations/{location\}/dataLabelingJobs/{data_labeling_job\}` */ - allowed?: boolean | null; + dataLabelingJob?: string | null; /** - * Regex used to match the request. + * The method name of the API RPC call. For example, "/google.cloud.aiplatform.{apiVersion\}.DatasetService.CreateDataset" */ - requestTakedownRegex?: string | null; + method?: string | null; /** - * Regex used to decide that response should be taken down. Empty when response is kept. + * For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project\}/locations/{location\}/operations/{operation\}` */ - responseTakedownRegex?: string | null; + operation?: string | null; } /** - * Holds the final routing decision, by storing the model_config_id. And individual scores each model got. + * Value is the value of the field. */ - export interface Schema$LearningGenaiRootRoutingDecision { - metadata?: Schema$LearningGenaiRootRoutingDecisionMetadata; + export interface Schema$GoogleCloudAiplatformV1Value { + /** + * A double value. + */ + doubleValue?: number | null; + /** + * An integer value. + */ + intValue?: string | null; /** - * The selected model to route traffic to. + * A string value. */ - modelConfigId?: string | null; + stringValue?: string | null; } /** - * Debug metadata about the routing decision. + * Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation */ - export interface Schema$LearningGenaiRootRoutingDecisionMetadata { - scoreBasedRoutingMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataScoreBased; - tokenLengthBasedRoutingMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBased; + export interface Schema$GoogleCloudAiplatformV1VertexAISearch { + /** + * Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project\}/locations/{location\}/collections/{collection\}/dataStores/{dataStore\}` + */ + datastore?: string | null; } /** - * If we are routing using scored based configuration, then the metadata about that is available in this proto. + * Metadata describes the input video content. */ - export interface Schema$LearningGenaiRootRoutingDecisionMetadataScoreBased { - /** - * The rule that was matched. - */ - matchedRule?: Schema$LearningGenaiRootScoreBasedRoutingConfigRule; + export interface Schema$GoogleCloudAiplatformV1VideoMetadata { /** - * The score that was generated by the router i.e. the model. + * Optional. The end offset of the video. */ - score?: Schema$LearningGenaiRootScore; + endOffset?: string | null; /** - * No rules were matched & therefore used the default fallback. + * Optional. The start offset of the video. */ - usedDefaultFallback?: boolean | null; - } - export interface Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBased { - modelInputTokenMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata[]; - modelMaxTokenMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata[]; + startOffset?: string | null; } - export interface Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata { - /** - * The length computed by backends using the formatter & tokenizer specific to the model - */ - computedInputTokenLength?: number | null; - modelId?: string | null; + /** + * Represents the spec of a worker pool in a job. + */ + export interface Schema$GoogleCloudAiplatformV1WorkerPoolSpec { /** - * If true, the model was selected as a fallback, since no model met requirements. + * The custom container task. */ - pickedAsFallback?: boolean | null; + containerSpec?: Schema$GoogleCloudAiplatformV1ContainerSpec; /** - * If true, the model was selected since it met the requriements. + * Disk spec. */ - selected?: boolean | null; - } - export interface Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata { - maxNumInputTokens?: number | null; - maxNumOutputTokens?: number | null; - modelId?: string | null; - } - export interface Schema$LearningGenaiRootRuleOutput { - decision?: string | null; - name?: string | null; - } - export interface Schema$LearningGenaiRootScore { - calculationType?: Schema$LearningGenaiRootCalculationType; + diskSpec?: Schema$GoogleCloudAiplatformV1DiskSpec; /** - * The internal_metadata is intended to be used by internal processors and will be cleared before returns. + * Optional. Immutable. The specification of a single machine. */ - internalMetadata?: Schema$LearningGenaiRootInternalMetadata; - thresholdType?: Schema$LearningGenaiRootThresholdType; + machineSpec?: Schema$GoogleCloudAiplatformV1MachineSpec; /** - * Top candidate tokens and log probabilities at each decoding step. + * Optional. List of NFS mount spec. */ - tokensAndLogprobPerDecodingStep?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStep; - value?: number | null; - } - export interface Schema$LearningGenaiRootScoreBasedRoutingConfigRule { + nfsMounts?: Schema$GoogleCloudAiplatformV1NfsMount[]; /** - * NOTE: Hardest examples have smaller values in their routing scores. + * The Python packaged task. */ - equalOrGreaterThan?: Schema$LearningGenaiRootScore; - lessThan?: Schema$LearningGenaiRootScore; + pythonPackageSpec?: Schema$GoogleCloudAiplatformV1PythonPackageSpec; /** - * This model_config_id points to ModelConfig::id which allows us to find the ModelConfig to route to. This is part of the banks specified in the ModelBankConfig. + * Optional. The number of worker replicas to use for this worker pool. */ - modelConfigId?: string | null; - } - /** - * Proto containing the results from the Universal Sentence Encoder / Other models - */ - export interface Schema$LearningGenaiRootScoredSimilarityTakedownPhrase { - phrase?: Schema$LearningGenaiRootSimilarityTakedownPhrase; - similarityScore?: number | null; + replicaCount?: string | null; } /** - * A token with its own score. + * Contains Feature values to be written for a specific entity. */ - export interface Schema$LearningGenaiRootScoredToken { + export interface Schema$GoogleCloudAiplatformV1WriteFeatureValuesPayload { /** - * Each end_token_score is a logprob for how well the completion would end at a particular token. See http://google3/labs/language/aida/config/proto/model_config.proto;l=376;rcl=573039459 + * Required. The ID of the entity. */ - endTokenScore?: number | null; + entityId?: string | null; /** - * Each score is the logprob for the token in model response. + * Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future. */ - score?: number | null; - token?: string | null; + featureValues?: { + [key: string]: Schema$GoogleCloudAiplatformV1FeatureValue; + } | null; } /** - * Each SimilarityTakedownPhrase treats a logical group of blocked and allowed phrases together along with a corresponding punt If the closest matching response is of the allowed type, we allow the response If the closest matching response is of the blocked type, we block the response. eg: Blocked phrase - "All lives matter" + * Request message for FeaturestoreOnlineServingService.WriteFeatureValues. */ - export interface Schema$LearningGenaiRootSimilarityTakedownPhrase { - blockedPhrase?: string | null; - } - export interface Schema$LearningGenaiRootSimilarityTakedownResult { - /** - * False when query or response should be taken down by any of the takedown rules, true otherwise. - */ - allowed?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1WriteFeatureValuesRequest { /** - * List of similar phrases with score. Set only if allowed=false. + * Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`. */ - scoredPhrases?: Schema$LearningGenaiRootScoredSimilarityTakedownPhrase[]; + payloads?: Schema$GoogleCloudAiplatformV1WriteFeatureValuesPayload[]; } - export interface Schema$LearningGenaiRootTakedownResult { + /** + * Response message for FeaturestoreOnlineServingService.WriteFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1WriteFeatureValuesResponse {} + /** + * Request message for TensorboardService.WriteTensorboardExperimentData. + */ + export interface Schema$GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest { /** - * False when query or response should be taken down by any of the takedown rules, true otherwise. + * Required. Requests containing per-run TensorboardTimeSeries data to write. */ - allowed?: boolean | null; - regexTakedownResult?: Schema$LearningGenaiRootRegexTakedownResult; - requestResponseTakedownResult?: Schema$LearningGenaiRootRequestResponseTakedownResult; - similarityTakedownResult?: Schema$LearningGenaiRootSimilarityTakedownResult; + writeRunDataRequests?: Schema$GoogleCloudAiplatformV1WriteTensorboardRunDataRequest[]; } /** - * The type of score that bundled with a threshold, and will not be attending the final score calculation. How each score type uses the threshold can be implementation details. + * Response message for TensorboardService.WriteTensorboardExperimentData. */ - export interface Schema$LearningGenaiRootThresholdType { - scoreType?: string | null; - threshold?: number | null; - } + export interface Schema$GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse {} /** - * Results of RandomSamplingParams::top_k_logprob_per_decoding_step. + * Request message for TensorboardService.WriteTensorboardRunData. */ - export interface Schema$LearningGenaiRootTokensAndLogProbPerDecodingStep { + export interface Schema$GoogleCloudAiplatformV1WriteTensorboardRunDataRequest { /** - * Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. + * Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` */ - chosenCandidates?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[]; + tensorboardRun?: string | null; /** - * Length = total number of decoding steps. + * Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000. */ - topCandidates?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates[]; + timeSeriesData?: Schema$GoogleCloudAiplatformV1TimeSeriesData[]; } /** - * A candidate at a decoding step. + * Response message for TensorboardService.WriteTensorboardRunData. + */ + export interface Schema$GoogleCloudAiplatformV1WriteTensorboardRunDataResponse {} + /** + * An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. */ - export interface Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate { + export interface Schema$GoogleCloudAiplatformV1XraiAttribution { /** - * The candidate's log probability. + * Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 */ - logProbability?: number | null; + blurBaselineConfig?: Schema$GoogleCloudAiplatformV1BlurBaselineConfig; /** - * The candidate’s token value. + * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf */ - token?: string | null; - } - /** - * Candidates with top log probabilities at each decoding step. - */ - export interface Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates { + smoothGradConfig?: Schema$GoogleCloudAiplatformV1SmoothGradConfig; /** - * Sorted by log probability in descending order. + * Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. */ - candidates?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[]; - } - /** - * A model can generate multiple signals and this captures all the generated signals for a single message. - */ - export interface Schema$LearningGenaiRootToxicityResult { - signals?: Schema$LearningGenaiRootToxicitySignal[]; - } - /** - * Proto to capture a signal generated by the toxicity model. - */ - export interface Schema$LearningGenaiRootToxicitySignal { - allowed?: boolean | null; - label?: string | null; - score?: number | null; + stepCount?: number | null; } /** - * Each TranslationRequestInfo corresponds to a request sent to the translation server. + * The response message for Locations.ListLocations. */ - export interface Schema$LearningGenaiRootTranslationRequestInfo { + export interface Schema$GoogleCloudLocationListLocationsResponse { /** - * The ISO-639 language code of source text in the initial request, detected automatically, if no source language was passed within the initial request. If the source language was passed, auto-detection of the language does not occur and this field is empty. + * A list of locations that matches the specified filter in the request. */ - detectedLanguageCodes?: string[] | null; + locations?: Schema$GoogleCloudLocationLocation[]; /** - * The sum of the size of all the contents in the request. + * The standard List next-page token. */ - totalContentSize?: string | null; - } - export interface Schema$LearningServingLlmAtlasOutputMetadata { - requestTopic?: string | null; - source?: string | null; + nextPageToken?: string | null; } /** - * LINT.IfChange This metadata contains additional information required for debugging. + * A resource that represents a Google Cloud location. */ - export interface Schema$LearningServingLlmMessageMetadata { - atlasMetadata?: Schema$LearningServingLlmAtlasOutputMetadata; + export interface Schema$GoogleCloudLocationLocation { /** - * Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not. + * The friendly name for this location, typically a nearby city name. For example, "Tokyo". */ - classifierSummary?: Schema$LearningGenaiRootClassifierOutputSummary; + displayName?: string | null; /** - * Contains metadata related to Codey Processors. + * Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"\} */ - codeyOutput?: Schema$LearningGenaiRootCodeyOutput; - currentStreamTextLength?: number | null; + labels?: {[key: string]: string} | null; /** - * Whether the corresponding message has been deleted. + * The canonical id for this location. For example: `"us-east1"`. */ - deleted?: boolean | null; + locationId?: string | null; /** - * Metadata for filters that triggered. + * Service-specific metadata. For example the available capacity at the given location. */ - filterMeta?: Schema$LearningGenaiRootFilterMetadata[]; + metadata?: {[key: string]: any} | null; /** - * This score is finally used for ranking the message. This will be same as the score present in `Message.score` field. + * Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"` */ - finalMessageScore?: Schema$LearningGenaiRootScore; + name?: string | null; + } + /** + * Associates `members`, or principals, with a `role`. + */ + export interface Schema$GoogleIamV1Binding { /** - * NOT YET IMPLEMENTED. + * The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ - finishReason?: string | null; - groundingMetadata?: Schema$LearningGenaiRootGroundingMetadata; + condition?: Schema$GoogleTypeExpr; /** - * Applies to streaming response message only. Whether the message is a code. + * Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid\}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid\}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid\}.svc.id.goog[{namespace\}/{kubernetes-sa\}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid\}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain\}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/group/{group_id\}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/x`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/group/{group_id\}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/x`: All identities in a workload identity pool. * `deleted:user:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid\}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid\}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid\}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`. */ - isCode?: boolean | null; + members?: string[] | null; /** - * Applies to Response message only. Indicates whether the message is a fallback and the response would have otherwise been empty. + * Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles). */ - isFallback?: boolean | null; + role?: string | null; + } + /** + * An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] \}, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", \} \} ], "etag": "BwWWja0YfJA=", "version": 3 \} ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). + */ + export interface Schema$GoogleIamV1Policy { /** - * Result from nlp_saft DetectLanguage method. Currently the predicted language code and language probability is used. + * Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`. */ - langidResult?: Schema$NlpSaftLangIdResult; + bindings?: Schema$GoogleIamV1Binding[]; /** - * Detected language. + * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. */ - language?: string | null; + etag?: string | null; /** - * The LM prefix used to generate this response. + * Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ - lmPrefix?: string | null; + version?: number | null; + } + /** + * Request message for `SetIamPolicy` method. + */ + export interface Schema$GoogleIamV1SetIamPolicyRequest { /** - * FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames. + * REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them. */ - lmrootInternalRequestMetrics?: Schema$LearningGenaiRootRequestMetrics; + policy?: Schema$GoogleIamV1Policy; + } + /** + * Response message for `TestIamPermissions` method. + */ + export interface Schema$GoogleIamV1TestIamPermissionsResponse { /** - * Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. + * A subset of `TestPermissionsRequest.permissions` that the caller is allowed. */ - mmRecitationResult?: Schema$LearningGenaiRecitationMMRecitationCheckResult; + permissions?: string[] | null; + } + /** + * The response message for Operations.ListOperations. + */ + export interface Schema$GoogleLongrunningListOperationsResponse { /** - * Number of Controlled Decoding rewind and repeats that have happened for this response. + * The standard List next-page token. */ - numRewinds?: number | null; + nextPageToken?: string | null; /** - * The original text generated by LLM. This is the raw output for debugging purposes. + * A list of operations that matches the specified filter in the request. */ - originalText?: string | null; + operations?: Schema$GoogleLongrunningOperation[]; + } + /** + * This resource represents a long-running operation that is the result of a network API call. + */ + export interface Schema$GoogleLongrunningOperation { /** - * Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only. + * If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. */ - perStreamDecodedTokenCount?: number | null; + done?: boolean | null; /** - * Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only. + * The error result of the operation in case of failure or cancellation. */ - perStreamReturnedTokenCount?: number | null; + error?: Schema$GoogleRpcStatus; /** - * Results of running RAI on the query or this response candidate. One output per rai_config. It will be populated regardless of whether the threshold is exceeded or not. + * Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. */ - raiOutputs?: Schema$LearningGenaiRootRAIOutput[]; + metadata?: {[key: string]: any} | null; /** - * Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome. + * The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id\}`. */ - recitationResult?: Schema$LearningGenaiRecitationRecitationResult; + name?: string | null; /** - * All the different scores for a message are logged here. + * The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. */ - scores?: Schema$LearningGenaiRootScore[]; + response?: {[key: string]: any} | null; + } + /** + * A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); \} + */ + export interface Schema$GoogleProtobufEmpty {} + /** + * The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + */ + export interface Schema$GoogleRpcStatus { /** - * Whether the response is terminated during streaming return. Only used for streaming requests. + * The status code, which should be an enum value of google.rpc.Code. */ - streamTerminated?: boolean | null; + code?: number | null; /** - * Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate. + * A list of messages that carry the error details. There is a common set of message types for APIs to use. */ - totalDecodedTokenCount?: number | null; + details?: Array<{[key: string]: any}> | null; /** - * Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only. + * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ - totalReturnedTokenCount?: number | null; + message?: string | null; + } + /** + * Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); \} public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); \} return resultBuilder.build(); \} // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; \} return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; \} static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; \} Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; \} [result autorelease]; return result; \} // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); \} var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); \}; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); \} resultBuilder.push(hexString); return resultBuilder.join(''); \}; // ... + */ + export interface Schema$GoogleTypeColor { /** - * Translated user-prompt used for RAI post processing. This is for internal processing only. We will translate in pre-processor and pass the translated text to the post processor using this field. It will be empty if non of the signals requested need translation. + * The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). */ - translatedUserPrompts?: string[] | null; + alpha?: number | null; /** - * The metadata from Vertex SafetyCat processors + * The amount of blue in the color as a value in the interval [0, 1]. */ - vertexRaiResult?: Schema$CloudAiNlLlmProtoServiceRaiResult; - } - export interface Schema$NlpSaftLangIdLocalesResult { + blue?: number | null; /** - * List of locales in which the text would be considered acceptable. Sorted in descending order according to each locale's respective likelihood. For example, if a Portuguese text is acceptable in both Brazil and Portugal, but is more strongly associated with Brazil, then the predictions would be ["pt-BR", "pt-PT"], in that order. May be empty, indicating that the model did not predict any acceptable locales. + * The amount of green in the color as a value in the interval [0, 1]. */ - predictions?: Schema$NlpSaftLangIdLocalesResultLocale[]; - } - export interface Schema$NlpSaftLangIdLocalesResultLocale { + green?: number | null; /** - * A BCP 47 language code that includes region information. For example, "pt-BR" or "pt-PT". This field will always be populated. + * The amount of red in the color as a value in the interval [0, 1]. */ - languageCode?: string | null; + red?: number | null; } - export interface Schema$NlpSaftLangIdResult { + /** + * Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp + */ + export interface Schema$GoogleTypeDate { /** - * The version of the model used to create these annotations. + * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ - modelVersion?: string | null; + day?: number | null; /** - * This field stores the n-best list of possible BCP 47 language code strings for a given input sorted in descending order according to each code's respective probability. + * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ - predictions?: Schema$NlpSaftLanguageSpan[]; + month?: number | null; /** - * This field stores language predictions of subspans of the input, when available. Each LanguageSpanSequence is a sequence of LanguageSpans. A particular sequence of LanguageSpans has an associated probability, and need not necessarily cover the entire input. If no language could be predicted for any span, then this field may be empty. + * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ - spanPredictions?: Schema$NlpSaftLanguageSpanSequence[]; + year?: number | null; } - export interface Schema$NlpSaftLanguageSpan { - end?: number | null; + /** + * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + */ + export interface Schema$GoogleTypeExpr { /** - * A BCP 47 language code for this span. + * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ - languageCode?: string | null; + description?: string | null; /** - * Optional field containing any information that was predicted about the specific locale(s) of the span. + * Textual representation of an expression in Common Expression Language syntax. */ - locales?: Schema$NlpSaftLangIdLocalesResult; + expression?: string | null; /** - * A probability associated with this prediction. + * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ - probability?: number | null; + location?: string | null; /** - * Start and end byte offsets, inclusive, within the given input string. A value of -1 implies that this field is not set. Both fields must either be set with a nonnegative value or both are unset. If both are unset then this LanguageSpan applies to the entire input. + * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ - start?: number | null; + title?: string | null; } - export interface Schema$NlpSaftLanguageSpanSequence { + /** + * Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time. + */ + export interface Schema$GoogleTypeInterval { /** - * A sequence of LanguageSpan objects, each assigning a language to a subspan of the input. + * Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end. */ - languageSpans?: Schema$NlpSaftLanguageSpan[]; + endTime?: string | null; /** - * The probability of this sequence of LanguageSpans. + * Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start. */ - probability?: number | null; + startTime?: string | null; } /** - * This is proto2's version of MessageSet. - */ - export interface Schema$Proto2BridgeMessageSet {} - /** - * Wire-format for a Status object + * Represents an amount of money with its currency type. */ - export interface Schema$UtilStatusProto { - /** - * The canonical error code (see codes.proto) that most closely corresponds to this status. This may be missing, and in the common case of the generic space, it definitely will be. - */ - canonicalCode?: number | null; - /** - * Numeric code drawn from the space specified below. Often, this is the canonical error space, and code is drawn from google3/util/task/codes.proto - */ - code?: number | null; + export interface Schema$GoogleTypeMoney { /** - * Detail message + * The three-letter currency code defined in ISO 4217. */ - message?: string | null; + currencyCode?: string | null; /** - * message_set associates an arbitrary proto message with the status. + * Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000. */ - messageSet?: Schema$Proto2BridgeMessageSet; + nanos?: number | null; /** - * The following are usually only present when code != 0 Space to which this status belongs + * The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. */ - space?: string | null; + units?: string | null; } export class Resource$Projects { diff --git a/src/apis/aiplatform/v1beta1.ts b/src/apis/aiplatform/v1beta1.ts index c7c2c99df0..6b4be1afdc 100644 --- a/src/apis/aiplatform/v1beta1.ts +++ b/src/apis/aiplatform/v1beta1.ts @@ -128,15 +128,6 @@ export namespace aiplatform_v1beta1 { } } - /** - * Video embedding response. - */ - export interface Schema$CloudAiLargeModelsVisionEmbedVideoResponse { - /** - * The embedding vector for the video. - */ - videoEmbeddings?: any[] | null; - } /** * Details for filtered input text. */ @@ -238,15 +229,6 @@ export namespace aiplatform_v1beta1 { */ video?: Schema$CloudAiLargeModelsVisionVideo; } - /** - * Generate media content response - */ - export interface Schema$CloudAiLargeModelsVisionMediaGenerateContentResponse { - /** - * Response to the user's request. - */ - response?: Schema$CloudAiNlLlmProtoServiceGenerateMultiModalResponse; - } export interface Schema$CloudAiLargeModelsVisionNamedBoundingBox { classes?: string[] | null; entities?: string[] | null; @@ -266,41 +248,6 @@ export namespace aiplatform_v1beta1 { */ scores?: number[] | null; } - /** - * Video reasoning response. - */ - export interface Schema$CloudAiLargeModelsVisionReasonVideoResponse { - /** - * Generated text responses. The generated responses for different segments within the same video. - */ - responses?: Schema$CloudAiLargeModelsVisionReasonVideoResponseTextResponse[]; - } - /** - * Contains text that is the response of the video captioning. - */ - export interface Schema$CloudAiLargeModelsVisionReasonVideoResponseTextResponse { - /** - * Partition of the caption's video in time. This field is intended for video captioning. To represent the start time and end time of the caption's video. - */ - relativeTemporalPartition?: Schema$CloudAiLargeModelsVisionRelativeTemporalPartition; - /** - * Text information - */ - text?: string | null; - } - /** - * For ease of use, assume that the start_offset is inclusive and the end_offset is exclusive. In mathematical terms, the partition would be written as [start_offset, end_offset). - */ - export interface Schema$CloudAiLargeModelsVisionRelativeTemporalPartition { - /** - * End time offset of the partition. - */ - endOffset?: string | null; - /** - * Start time offset of the partition. - */ - startOffset?: string | null; - } export interface Schema$CloudAiLargeModelsVisionSemanticFilterResponse { /** * Class labels of the bounding boxes that failed the semantic filtering. Bounding box coordinates. @@ -324,2436 +271,2557 @@ export namespace aiplatform_v1beta1 { */ video?: string | null; } - export interface Schema$CloudAiNlLlmProtoServiceCandidate { - /** - * Source attribution of the generated content. - */ - citationMetadata?: Schema$CloudAiNlLlmProtoServiceCitationMetadata; - /** - * Content of the candidate. - */ - content?: Schema$CloudAiNlLlmProtoServiceContent; - /** - * A string that describes the filtering behavior in more detail. Only filled when reason is set. - */ - finishMessage?: string | null; - /** - * The reason why the model stopped generating tokens. - */ - finishReason?: string | null; + /** + * Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; \} service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); \} Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); \} Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged. + */ + export interface Schema$GoogleApiHttpBody { /** - * Grounding metadata. Combine with the facts list from response to generate grounding citations for this choice. + * The HTTP Content-Type header value specifying the content type of the body. */ - groundingMetadata?: Schema$LearningGenaiRootGroundingMetadata; + contentType?: string | null; /** - * Index of the candidate. + * The HTTP request/response body as raw binary. */ - index?: number | null; + data?: string | null; /** - * Safety ratings of the generated content. + * Application specific response metadata. Must be set in the first response for streaming APIs. */ - safetyRatings?: Schema$CloudAiNlLlmProtoServiceSafetyRating[]; + extensions?: Array<{[key: string]: any}> | null; } /** - * Source attributions for content. + * Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy. */ - export interface Schema$CloudAiNlLlmProtoServiceCitation { + export interface Schema$GoogleCloudAiplatformV1beta1ActiveLearningConfig { /** - * End index into the content. + * Max number of human labeled DataItems. */ - endIndex?: number | null; + maxDataItemCount?: string | null; /** - * License of the attribution. + * Max percent of total DataItems for human labeling. */ - license?: string | null; + maxDataItemPercentage?: number | null; /** - * Publication date of the attribution. + * Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy. */ - publicationDate?: Schema$GoogleTypeDate; + sampleConfig?: Schema$GoogleCloudAiplatformV1beta1SampleConfig; /** - * Start index into the content. + * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. */ - startIndex?: number | null; + trainingConfig?: Schema$GoogleCloudAiplatformV1beta1TrainingConfig; + } + /** + * Request message for MetadataService.AddContextArtifactsAndExecutions. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsRequest { /** - * Title of the attribution. + * The resource names of the Artifacts to attribute to the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/artifacts/{artifact\}` */ - title?: string | null; + artifacts?: string[] | null; /** - * Url reference of the attribution. + * The resource names of the Executions to associate with the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/executions/{execution\}` */ - uri?: string | null; + executions?: string[] | null; } /** - * A collection of source attributions for a piece of content. + * Response message for MetadataService.AddContextArtifactsAndExecutions. */ - export interface Schema$CloudAiNlLlmProtoServiceCitationMetadata { - /** - * List of citations. - */ - citations?: Schema$CloudAiNlLlmProtoServiceCitation[]; - } + export interface Schema$GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsResponse {} /** - * The content of a single message from a participant. + * Request message for MetadataService.AddContextChildren. */ - export interface Schema$CloudAiNlLlmProtoServiceContent { - /** - * If true, the content is from a cached content. - */ - isCached?: boolean | null; - /** - * The parts of the message. - */ - parts?: Schema$CloudAiNlLlmProtoServicePart[]; + export interface Schema$GoogleCloudAiplatformV1beta1AddContextChildrenRequest { /** - * The role of the current conversation participant. + * The resource names of the child Contexts. */ - role?: string | null; + childContexts?: string[] | null; } /** - * A condense version of WorldFact (assistant/boq/lamda/factuality/proto/factuality.proto) to propagate the essential information about the fact used in factuality to the upstream caller. + * Response message for MetadataService.AddContextChildren. */ - export interface Schema$CloudAiNlLlmProtoServiceFact { - /** - * Query that is used to retrieve this fact. - */ - query?: string | null; - /** - * If present, the summary/snippet of the fact. - */ - summary?: string | null; - /** - * If present, it refers to the title of this fact. - */ - title?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1AddContextChildrenResponse {} + /** + * Request message for MetadataService.AddExecutionEvents. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AddExecutionEventsRequest { /** - * If present, this URL links to the webpage of the fact. + * The Events to create and add. */ - url?: string | null; + events?: Schema$GoogleCloudAiplatformV1beta1Event[]; } /** - * Function call details. + * Response message for MetadataService.AddExecutionEvents. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AddExecutionEventsResponse {} + /** + * Request message for VizierService.AddTrialMeasurement. */ - export interface Schema$CloudAiNlLlmProtoServiceFunctionCall { - /** - * The function parameters and values in JSON format. - */ - args?: {[key: string]: any} | null; + export interface Schema$GoogleCloudAiplatformV1beta1AddTrialMeasurementRequest { /** - * Required. The name of the function to call. + * Required. The measurement to be added to a Trial. */ - name?: string | null; + measurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; } /** - * Function response details. + * Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem. */ - export interface Schema$CloudAiNlLlmProtoServiceFunctionResponse { + export interface Schema$GoogleCloudAiplatformV1beta1Annotation { /** - * Required. The name of the function to call. + * Output only. The source of the Annotation. */ - name?: string | null; + annotationSource?: Schema$GoogleCloudAiplatformV1beta1UserActionReference; /** - * Required. The function response in JSON object format. + * Output only. Timestamp when this Annotation was created. */ - response?: {[key: string]: any} | null; - } - export interface Schema$CloudAiNlLlmProtoServiceGenerateMultiModalResponse { + createTime?: string | null; /** - * Possible candidate responses to the conversation up until this point. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - candidates?: Schema$CloudAiNlLlmProtoServiceCandidate[]; + etag?: string | null; /** - * Debug information containing message metadata. Clients should not consume this field, and this is only populated for Flow Runner path. + * Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Annotation: * "aiplatform.googleapis.com/annotation_set_name": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * "aiplatform.googleapis.com/payload_schema": output only, its value is the payload_schema's title. */ - debugMetadata?: Schema$CloudAiNlLlmProtoServiceMessageMetadata; + labels?: {[key: string]: string} | null; /** - * External facts retrieved for factuality/grounding. + * Output only. Resource name of the Annotation. */ - facts?: Schema$CloudAiNlLlmProtoServiceFact[]; + name?: string | null; /** - * Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. + * Required. The schema of the payload can be found in payload_schema. */ - promptFeedback?: Schema$CloudAiNlLlmProtoServicePromptFeedback; + payload?: any | null; /** - * Billable prediction metrics. + * Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata. */ - reportingMetrics?: Schema$IntelligenceCloudAutomlXpsReportingMetrics; + payloadSchemaUri?: string | null; /** - * Usage metadata about the response(s). + * Output only. Timestamp when this Annotation was last updated. */ - usageMetadata?: Schema$CloudAiNlLlmProtoServiceUsageMetadata; + updateTime?: string | null; } - export interface Schema$CloudAiNlLlmProtoServiceMessageMetadata { + /** + * Identifies a concept with which DataItems may be annotated with. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AnnotationSpec { + /** + * Output only. Timestamp when this AnnotationSpec was created. + */ + createTime?: string | null; /** - * Factuality-related debug metadata. + * Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - factualityDebugMetadata?: Schema$LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata; + displayName?: string | null; /** - * Filter metadata of the input messages. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - inputFilterInfo?: Schema$LearningServingLlmMessageMetadata; + etag?: string | null; /** - * This score is generated by the router model to decide which model to use + * Output only. Resource name of the AnnotationSpec. */ - modelRoutingDecision?: Schema$LearningGenaiRootRoutingDecision; + name?: string | null; /** - * Filter metadata of the output messages. + * Output only. Timestamp when AnnotationSpec was last updated. */ - outputFilterInfo?: Schema$LearningServingLlmMessageMetadata[]; + updateTime?: string | null; } /** - * A single part of a message. + * Instance of a general artifact. */ - export interface Schema$CloudAiNlLlmProtoServicePart { - /** - * Document metadata. The metadata should only be used by the Cloud LLM when supporting document mime types. It will only be populated when this image input part is converted from a document input part. - */ - documentMetadata?: Schema$CloudAiNlLlmProtoServicePartDocumentMetadata; + export interface Schema$GoogleCloudAiplatformV1beta1Artifact { /** - * URI-based data. + * Output only. Timestamp when this Artifact was created. */ - fileData?: Schema$CloudAiNlLlmProtoServicePartFileData; + createTime?: string | null; /** - * Function call data. + * Description of the Artifact */ - functionCall?: Schema$CloudAiNlLlmProtoServiceFunctionCall; + description?: string | null; /** - * Function response data. + * User provided display name of the Artifact. May be up to 128 Unicode characters. */ - functionResponse?: Schema$CloudAiNlLlmProtoServiceFunctionResponse; + displayName?: string | null; /** - * Inline bytes data + * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - inlineData?: Schema$CloudAiNlLlmProtoServicePartBlob; + etag?: string | null; /** - * Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields. + * The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded). */ - lmRootMetadata?: Schema$CloudAiNlLlmProtoServicePartLMRootMetadata; + labels?: {[key: string]: string} | null; /** - * Text input. + * Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. */ - text?: string | null; + metadata?: {[key: string]: any} | null; /** - * Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + * Output only. The resource name of the Artifact. */ - videoMetadata?: Schema$CloudAiNlLlmProtoServicePartVideoMetadata; - } - /** - * Represents arbitrary blob data input. - */ - export interface Schema$CloudAiNlLlmProtoServicePartBlob { + name?: string | null; /** - * Inline data. + * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - data?: string | null; + schemaTitle?: string | null; /** - * The mime type corresponding to this input. + * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - mimeType?: string | null; + schemaVersion?: string | null; /** - * Original file data where the blob comes from. + * The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions. */ - originalFileData?: Schema$CloudAiNlLlmProtoServicePartFileData; - } - /** - * Metadata describes the original input document content. - */ - export interface Schema$CloudAiNlLlmProtoServicePartDocumentMetadata { + state?: string | null; /** - * The original document blob. + * Output only. Timestamp when this Artifact was last updated. */ - originalDocumentBlob?: Schema$CloudAiNlLlmProtoServicePartBlob; + updateTime?: string | null; /** - * The (1-indexed) page number of the image in the original document. The first page carries the original document content and mime type. + * The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file. */ - pageNumber?: number | null; + uri?: string | null; } /** - * Represents file data. + * Metadata information for NotebookService.AssignNotebookRuntime. */ - export interface Schema$CloudAiNlLlmProtoServicePartFileData { + export interface Schema$GoogleCloudAiplatformV1beta1AssignNotebookRuntimeOperationMetadata { /** - * Inline data. + * The operation generic information. */ - fileUri?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * The mime type corresponding to this input. + * A human-readable message that shows the intermediate progress details of NotebookRuntime. */ - mimeType?: string | null; + progressMessage?: string | null; } /** - * Metadata provides extra info for building the LM Root request. + * Request message for NotebookService.AssignNotebookRuntime. */ - export interface Schema$CloudAiNlLlmProtoServicePartLMRootMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1AssignNotebookRuntimeRequest { /** - * Chunk id that will be used when mapping the part to the LM Root's chunk. + * Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment. */ - chunkId?: string | null; - } - /** - * Metadata describes the input video content. - */ - export interface Schema$CloudAiNlLlmProtoServicePartVideoMetadata { + notebookRuntime?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntime; /** - * The end offset of the video. + * Optional. User specified ID for the notebook runtime. */ - endOffset?: string | null; + notebookRuntimeId?: string | null; /** - * The start offset of the video. + * Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one). */ - startOffset?: string | null; + notebookRuntimeTemplate?: string | null; } /** - * Content filter results for a prompt sent in the request. + * Attribution that explains a particular prediction output. */ - export interface Schema$CloudAiNlLlmProtoServicePromptFeedback { + export interface Schema$GoogleCloudAiplatformV1beta1Attribution { /** - * Blocked reason. + * Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information. */ - blockReason?: string | null; + approximationError?: number | null; /** - * A readable block reason message. + * Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank \> 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged. */ - blockReasonMessage?: string | null; + baselineOutputValue?: number | null; /** - * Safety ratings. + * Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated). */ - safetyRatings?: Schema$CloudAiNlLlmProtoServiceSafetyRating[]; - } - /** - * The RAI results for a given text. Next ID: 12 - */ - export interface Schema$CloudAiNlLlmProtoServiceRaiResult { + featureAttributions?: any | null; /** - * Recitation result from Aida recitation checker. + * Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index. */ - aidaRecitationResult?: Schema$LanguageLabsAidaTrustRecitationProtoRecitationResult; + instanceOutputValue?: number | null; /** - * Use `triggered_blocklist`. + * Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index. */ - blocked?: boolean | null; + outputDisplayName?: string | null; /** - * The error codes indicate which RAI filters block the response. + * Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0. */ - errorCodes?: number[] | null; + outputIndex?: number[] | null; /** - * Whether the text should be filtered and not shown to the end user. This is determined based on a combination of `triggered_recitation`, `triggered_blocklist`, `language_filter_result`, and `triggered_safety_filter`. + * Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs. */ - filtered?: boolean | null; + outputName?: string | null; + } + /** + * Auth configuration to run the extension. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AuthConfig { /** - * Language filter result from SAFT LangId. + * Config for API key auth. */ - languageFilterResult?: Schema$LearningGenaiRootLanguageFilterResult; + apiKeyConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig; /** - * Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. + * Type of auth scheme. */ - mmRecitationResult?: Schema$LearningGenaiRecitationMMRecitationCheckResult; + authType?: string | null; /** - * The RAI signals for the text. + * Config for Google Service Account auth. */ - raiSignals?: Schema$CloudAiNlLlmProtoServiceRaiSignal[]; + googleServiceAccountConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig; /** - * Translation request info during RAI for debugging purpose. Each TranslationRequestInfo corresponds to a request sent to the translation server. + * Config for HTTP Basic auth. */ - translationRequestInfos?: Schema$LearningGenaiRootTranslationRequestInfo[]; - /** - * Whether the text triggered the blocklist. - */ - triggeredBlocklist?: boolean | null; + httpBasicAuthConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig; /** - * Whether the text should be blocked by the recitation result from Aida recitation checker. It is determined from aida_recitation_result. + * Config for user oauth. */ - triggeredRecitation?: boolean | null; + oauthConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigOauthConfig; /** - * Whether the text triggered the safety filter. Currently, this is due to CSAI triggering or one of four categories (derogatory, sexual, toxic, violent) having a score over the filter threshold. + * Config for user OIDC auth. */ - triggeredSafetyFilter?: boolean | null; + oidcConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigOidcConfig; } /** - * An RAI signal for a single category. + * Config for authentication with API key. */ - export interface Schema$CloudAiNlLlmProtoServiceRaiSignal { - /** - * The confidence level for the RAI category. - */ - confidence?: string | null; - /** - * Whether the category is flagged as being present. Currently, this is set to true if score \>= 0.5. - */ - flagged?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig { /** - * The influential terms that could potentially block the response. + * Required. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project\}/secrets/{secrete\}/versions/{version\}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. */ - influentialTerms?: Schema$CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm[]; + apiKeySecret?: string | null; /** - * The RAI category. + * Required. The location of the API key. */ - raiCategory?: string | null; + httpElementLocation?: string | null; /** - * The score for the category, in the range [0.0, 1.0]. + * Required. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. */ - score?: number | null; + name?: string | null; } /** - * The influential term that could potentially block the response. + * Config for Google Service Account Authentication. */ - export interface Schema$CloudAiNlLlmProtoServiceRaiSignalInfluentialTerm { + export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig { /** - * The beginning offset of the influential term. + * Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. */ - beginOffset?: number | null; + serviceAccount?: string | null; + } + /** + * Config for HTTP Basic Authentication. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig { /** - * The confidence score of the influential term. + * Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project\}/secrets/{secrete\}/versions/{version\}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. */ - confidence?: number | null; + credentialSecret?: string | null; + } + /** + * Config for user oauth. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigOauthConfig { /** - * The source of the influential term, prompt or response. + * Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. */ - source?: string | null; + accessToken?: string | null; /** - * The influential term. + * The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. */ - term?: string | null; + serviceAccount?: string | null; } /** - * Safety rating corresponding to the generated content. + * Config for user OIDC auth. */ - export interface Schema$CloudAiNlLlmProtoServiceSafetyRating { - /** - * Indicates whether the content was filtered out because of this rating. - */ - blocked?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigOidcConfig { /** - * Harm category. + * OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. */ - category?: string | null; + idToken?: string | null; /** - * The influential terms that could potentially block the response. + * The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). */ - influentialTerms?: Schema$CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm[]; + serviceAccount?: string | null; + } + /** + * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AutomaticResources { /** - * Harm probability levels in the content. + * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. */ - probability?: string | null; + maxReplicaCount?: number | null; /** - * Harm probability score. + * Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. */ - probabilityScore?: number | null; + minReplicaCount?: number | null; + } + /** + * The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count. + */ + export interface Schema$GoogleCloudAiplatformV1beta1AutoscalingMetricSpec { /** - * Harm severity levels in the content. + * Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` */ - severity?: string | null; + metricName?: string | null; /** - * Harm severity score. + * The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. */ - severityScore?: number | null; + target?: number | null; } /** - * The influential term that could potentially block the response. + * The storage details for Avro input content. */ - export interface Schema$CloudAiNlLlmProtoServiceSafetyRatingInfluentialTerm { + export interface Schema$GoogleCloudAiplatformV1beta1AvroSource { /** - * The beginning offset of the influential term. + * Required. Google Cloud Storage location. */ - beginOffset?: number | null; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + } + /** + * Request message for PipelineService.BatchCancelPipelineJobs. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsRequest { /** - * The confidence score of the influential term. + * Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` */ - confidence?: number | null; + names?: string[] | null; + } + /** + * Response message for PipelineService.BatchCancelPipelineJobs. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsResponse { /** - * The source of the influential term, prompt or response. + * PipelineJobs cancelled. */ - source?: string | null; + pipelineJobs?: Schema$GoogleCloudAiplatformV1beta1PipelineJob[]; + } + /** + * Details of operations that perform batch create Features. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateFeaturesOperationMetadata { /** - * The influential term. + * Operation metadata for Feature. */ - term?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Usage metadata about response(s). + * Request message for FeaturestoreService.BatchCreateFeatures. */ - export interface Schema$CloudAiNlLlmProtoServiceUsageMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest { /** - * Number of tokens in the response(s). + * Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message. */ - candidatesTokenCount?: number | null; + requests?: Schema$GoogleCloudAiplatformV1beta1CreateFeatureRequest[]; + } + /** + * Response message for FeaturestoreService.BatchCreateFeatures. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateFeaturesResponse { /** - * Number of tokens in the request. + * The Features created. */ - promptTokenCount?: number | null; - totalTokenCount?: number | null; + features?: Schema$GoogleCloudAiplatformV1beta1Feature[]; } /** - * Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; \} service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); \} Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); \} Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged. + * Request message for TensorboardService.BatchCreateTensorboardRuns. */ - export interface Schema$GoogleApiHttpBody { + export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsRequest { /** - * The HTTP Content-Type header value specifying the content type of the body. + * Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch. */ - contentType?: string | null; + requests?: Schema$GoogleCloudAiplatformV1beta1CreateTensorboardRunRequest[]; + } + /** + * Response message for TensorboardService.BatchCreateTensorboardRuns. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsResponse { /** - * The HTTP request/response body as raw binary. + * The created TensorboardRuns. */ - data?: string | null; + tensorboardRuns?: Schema$GoogleCloudAiplatformV1beta1TensorboardRun[]; + } + /** + * Request message for TensorboardService.BatchCreateTensorboardTimeSeries. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesRequest { /** - * Application specific response metadata. Must be set in the first response for streaming APIs. + * Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch. */ - extensions?: Array<{[key: string]: any}> | null; + requests?: Schema$GoogleCloudAiplatformV1beta1CreateTensorboardTimeSeriesRequest[]; } /** - * Parameters that configure the active learning pipeline. Active learning will label the data incrementally by several iterations. For every iteration, it will select a batch of data based on the sampling strategy. + * Response message for TensorboardService.BatchCreateTensorboardTimeSeries. */ - export interface Schema$GoogleCloudAiplatformV1beta1ActiveLearningConfig { + export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesResponse { /** - * Max number of human labeled DataItems. + * The created TensorboardTimeSeries. */ - maxDataItemCount?: string | null; + tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries[]; + } + /** + * A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchDedicatedResources { /** - * Max percent of total DataItems for human labeling. + * Required. Immutable. The specification of a single machine. */ - maxDataItemPercentage?: number | null; + machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; /** - * Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy. + * Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10. */ - sampleConfig?: Schema$GoogleCloudAiplatformV1beta1SampleConfig; + maxReplicaCount?: number | null; /** - * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. + * Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count */ - trainingConfig?: Schema$GoogleCloudAiplatformV1beta1TrainingConfig; + startingReplicaCount?: number | null; } /** - * Request message for MetadataService.AddContextArtifactsAndExecutions. + * Request message for PipelineService.BatchDeletePipelineJobs. */ - export interface Schema$GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsRequest { - /** - * The resource names of the Artifacts to attribute to the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/artifacts/{artifact\}` - */ - artifacts?: string[] | null; + export interface Schema$GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsRequest { /** - * The resource names of the Executions to associate with the Context. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadatastore\}/executions/{execution\}` + * Required. The names of the PipelineJobs to delete. A maximum of 32 PipelineJobs can be deleted in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` */ - executions?: string[] | null; + names?: string[] | null; } /** - * Response message for MetadataService.AddContextArtifactsAndExecutions. - */ - export interface Schema$GoogleCloudAiplatformV1beta1AddContextArtifactsAndExecutionsResponse {} - /** - * Request message for MetadataService.AddContextChildren. + * Response message for PipelineService.BatchDeletePipelineJobs. */ - export interface Schema$GoogleCloudAiplatformV1beta1AddContextChildrenRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsResponse { /** - * The resource names of the child Contexts. + * PipelineJobs deleted. */ - childContexts?: string[] | null; + pipelineJobs?: Schema$GoogleCloudAiplatformV1beta1PipelineJob[]; } /** - * Response message for MetadataService.AddContextChildren. + * Request message for ModelService.BatchImportEvaluatedAnnotations */ - export interface Schema$GoogleCloudAiplatformV1beta1AddContextChildrenResponse {} + export interface Schema$GoogleCloudAiplatformV1beta1BatchImportEvaluatedAnnotationsRequest { + /** + * Required. Evaluated annotations resource to be imported. + */ + evaluatedAnnotations?: Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotation[]; + } /** - * Request message for MetadataService.AddExecutionEvents. + * Response message for ModelService.BatchImportEvaluatedAnnotations */ - export interface Schema$GoogleCloudAiplatformV1beta1AddExecutionEventsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BatchImportEvaluatedAnnotationsResponse { /** - * The Events to create and add. + * Output only. Number of EvaluatedAnnotations imported. */ - events?: Schema$GoogleCloudAiplatformV1beta1Event[]; + importedEvaluatedAnnotationsCount?: number | null; } /** - * Response message for MetadataService.AddExecutionEvents. + * Request message for ModelService.BatchImportModelEvaluationSlices */ - export interface Schema$GoogleCloudAiplatformV1beta1AddExecutionEventsResponse {} + export interface Schema$GoogleCloudAiplatformV1beta1BatchImportModelEvaluationSlicesRequest { + /** + * Required. Model evaluation slice resource to be imported. + */ + modelEvaluationSlices?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSlice[]; + } /** - * Request message for VizierService.AddTrialMeasurement. + * Response message for ModelService.BatchImportModelEvaluationSlices */ - export interface Schema$GoogleCloudAiplatformV1beta1AddTrialMeasurementRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BatchImportModelEvaluationSlicesResponse { /** - * Required. The measurement to be added to a Trial. + * Output only. List of imported ModelEvaluationSlice.name. */ - measurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; + importedModelEvaluationSlices?: string[] | null; } /** - * Used to assign specific AnnotationSpec to a particular area of a DataItem or the whole part of the DataItem. + * Runtime operation information for MigrationService.BatchMigrateResources. */ - export interface Schema$GoogleCloudAiplatformV1beta1Annotation { + export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesOperationMetadata { /** - * Output only. The source of the Annotation. + * The common part of the operation metadata. */ - annotationSource?: Schema$GoogleCloudAiplatformV1beta1UserActionReference; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Output only. Timestamp when this Annotation was created. + * Partial results that reflect the latest migration operation progress. */ - createTime?: string | null; + partialResults?: Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesOperationMetadataPartialResult[]; + } + /** + * Represents a partial result in batch migration operation for one MigrateResourceRequest. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesOperationMetadataPartialResult { /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Migrated dataset resource name. */ - etag?: string | null; + dataset?: string | null; /** - * Optional. The labels with user-defined metadata to organize your Annotations. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Annotation(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Annotation: * "aiplatform.googleapis.com/annotation_set_name": optional, name of the UI's annotation set this Annotation belongs to. If not set, the Annotation is not visible in the UI. * "aiplatform.googleapis.com/payload_schema": output only, its value is the payload_schema's title. + * The error result of the migration request in case of failure. */ - labels?: {[key: string]: string} | null; + error?: Schema$GoogleRpcStatus; /** - * Output only. Resource name of the Annotation. + * Migrated model resource name. */ - name?: string | null; + model?: string | null; /** - * Required. The schema of the payload can be found in payload_schema. + * It's the same as the value in MigrateResourceRequest.migrate_resource_requests. */ - payload?: any | null; + request?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequest; + } + /** + * Request message for MigrationService.BatchMigrateResources. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesRequest { /** - * Required. Google Cloud Storage URI points to a YAML file describing payload. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's metadata. + * Required. The request messages specifying the resources to migrate. They must be in the same location as the destination. Up to 50 resources can be migrated in one batch. */ - payloadSchemaUri?: string | null; + migrateResourceRequests?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequest[]; + } + /** + * Response message for MigrationService.BatchMigrateResources. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesResponse { /** - * Output only. Timestamp when this Annotation was last updated. + * Successfully migrated resources. */ - updateTime?: string | null; + migrateResourceResponses?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceResponse[]; } /** - * Identifies a concept with which DataItems may be annotated with. + * A job that uses a Model to produce predictions on multiple input instances. If predictions for significant portion of the instances fail, the job may finish without attempting predictions for all remaining instances. */ - export interface Schema$GoogleCloudAiplatformV1beta1AnnotationSpec { + export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJob { /** - * Output only. Timestamp when this AnnotationSpec was created. + * Output only. Statistics on completed and failed prediction instances. + */ + completionStats?: Schema$GoogleCloudAiplatformV1beta1CompletionStats; + /** + * Output only. Time when the BatchPredictionJob was created. */ createTime?: string | null; /** - * Required. The user-defined name of the AnnotationSpec. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * The config of resources used by the Model during the batch prediction. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. */ - displayName?: string | null; + dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1BatchDedicatedResources; /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. */ - etag?: string | null; + disableContainerLogging?: boolean | null; /** - * Output only. Resource name of the AnnotationSpec. + * Required. The user-defined name of this BatchPredictionJob. */ - name?: string | null; + displayName?: string | null; /** - * Output only. Timestamp when AnnotationSpec was last updated. + * Customer-managed encryption key options for a BatchPredictionJob. If this is set, then all resources created by the BatchPredictionJob will be encrypted with the provided encryption key. */ - updateTime?: string | null; - } - /** - * Instance of a general artifact. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Artifact { + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Output only. Timestamp when this Artifact was created. + * Output only. Time when the BatchPredictionJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ - createTime?: string | null; + endTime?: string | null; /** - * Description of the Artifact + * Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. */ - description?: string | null; + error?: Schema$GoogleRpcStatus; /** - * User provided display name of the Artifact. May be up to 128 Unicode characters. + * Explanation configuration for this BatchPredictionJob. Can be specified only if generate_explanation is set to `true`. This value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of the explanation_spec object is not populated, the corresponding field of the Model.explanation_spec object is inherited. */ - displayName?: string | null; + explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; /** - * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Generate explanation with the batch prediction results. When set to `true`, the batch prediction output changes based on the `predictions_format` field of the BatchPredictionJob.output_config object: * `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the Explanation object. * `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the Explanation object. * `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_spec must be populated. */ - etag?: string | null; + generateExplanation?: boolean | null; /** - * The labels with user-defined metadata to organize your Artifacts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Artifact (System labels are excluded). + * Required. Input configuration of the instances on which predictions are performed. The schema of any single instance may be specified via the Model's PredictSchemata's instance_schema_uri. */ - labels?: {[key: string]: string} | null; + inputConfig?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInputConfig; /** - * Properties of the Artifact. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + * Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. */ - metadata?: {[key: string]: any} | null; + instanceConfig?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInstanceConfig; /** - * Output only. The resource name of the Artifact. + * The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - name?: string | null; + labels?: {[key: string]: string} | null; /** - * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Immutable. Parameters configuring the batch behavior. Currently only applicable when dedicated_resources are used (in other cases Vertex AI does the tuning itself). */ - schemaTitle?: string | null; + manualBatchTuningParameters?: Schema$GoogleCloudAiplatformV1beta1ManualBatchTuningParameters; /** - * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * The name of the Model resource that produces the predictions via this job, must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. The model resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. The model resource could also be a publisher model. Example: `publishers/{publisher\}/models/{model\}` or `projects/{project\}/locations/{location\}/publishers/{publisher\}/models/{model\}` */ - schemaVersion?: string | null; + model?: string | null; /** - * The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines), and the system does not prescribe or check the validity of state transitions. + * Model monitoring config will be used for analysis model behaviors, based on the input and output to the batch prediction job, as well as the provided training dataset. */ - state?: string | null; + modelMonitoringConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringConfig; /** - * Output only. Timestamp when this Artifact was last updated. + * Get batch prediction job monitoring statistics. */ - updateTime?: string | null; + modelMonitoringStatsAnomalies?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomalies[]; /** - * The uniform resource identifier of the artifact file. May be empty if there is no actual artifact file. + * Output only. The running status of the model monitoring pipeline. */ - uri?: string | null; - } - /** - * Metadata information for NotebookService.AssignNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1beta1AssignNotebookRuntimeOperationMetadata { + modelMonitoringStatus?: Schema$GoogleRpcStatus; /** - * The operation generic information. + * The parameters that govern the predictions. The schema of the parameters may be specified via the Model's PredictSchemata's parameters_schema_uri. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + modelParameters?: any | null; /** - * A human-readable message that shows the intermediate progress details of NotebookRuntime. + * Output only. The version ID of the Model that produces the predictions via this job. */ - progressMessage?: string | null; - } - /** - * Request message for NotebookService.AssignNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1beta1AssignNotebookRuntimeRequest { + modelVersionId?: string | null; /** - * Required. Provide runtime specific information (e.g. runtime owner, notebook id) used for NotebookRuntime assignment. + * Output only. Resource name of the BatchPredictionJob. */ - notebookRuntime?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntime; + name?: string | null; /** - * Optional. User specified ID for the notebook runtime. + * Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of Model's PredictSchemata's instance_schema_uri and prediction_schema_uri. */ - notebookRuntimeId?: string | null; + outputConfig?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputConfig; /** - * Required. The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be assigned (reuse or create a new one). + * Output only. Information further describing the output of this job. */ - notebookRuntimeTemplate?: string | null; - } - /** - * Attribution that explains a particular prediction output. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Attribution { + outputInfo?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputInfo; /** - * Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information. + * Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard Google Cloud error details. */ - approximationError?: number | null; + partialFailures?: Schema$GoogleRpcStatus[]; /** - * Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model's predicted output has multiple dimensions (rank \> 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged. + * Output only. Information about resources that had been consumed by this job. Provided in real time at best effort basis, as well as a final value once the job completes. Note: This field currently may be not populated for batch predictions that use AutoML Models. */ - baselineOutputValue?: number | null; + resourcesConsumed?: Schema$GoogleCloudAiplatformV1beta1ResourcesConsumed; /** - * Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature's input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated). + * The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. */ - featureAttributions?: any | null; + serviceAccount?: string | null; /** - * Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index. + * Output only. Time when the BatchPredictionJob for the first time entered the `JOB_STATE_RUNNING` state. */ - instanceOutputValue?: number | null; + startTime?: string | null; /** - * Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index. + * Output only. The detailed state of the job. */ - outputDisplayName?: string | null; + state?: string | null; /** - * Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0. + * Contains model information necessary to perform batch prediction without requiring uploading to model registry. Exactly one of model and unmanaged_container_model must be set. */ - outputIndex?: number[] | null; + unmanagedContainerModel?: Schema$GoogleCloudAiplatformV1beta1UnmanagedContainerModel; /** - * Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs. + * Output only. Time when the BatchPredictionJob was most recently updated. */ - outputName?: string | null; + updateTime?: string | null; } /** - * Auth configuration to run the extension. + * Configures the input to BatchPredictionJob. See Model.supported_input_storage_formats for Model's supported input formats, and how instances should be expressed via any of them. */ - export interface Schema$GoogleCloudAiplatformV1beta1AuthConfig { + export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInputConfig { /** - * Config for API key auth. + * The BigQuery location of the input table. The schema of the table should be in the format described by the given context OpenAPI Schema, if one is provided. The table may contain additional columns that are not described by the schema, and they will be ignored. */ - apiKeyConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig; + bigquerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; /** - * Type of auth scheme. + * The Cloud Storage location for the input instances. */ - authType?: string | null; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; /** - * Config for Google Service Account auth. + * Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. */ - googleServiceAccountConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig; + instancesFormat?: string | null; + } + /** + * Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. + */ + export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInstanceConfig { /** - * Config for HTTP Basic auth. + * Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. */ - httpBasicAuthConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig; + excludedFields?: string[] | null; /** - * Config for user oauth. + * Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. */ - oauthConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigOauthConfig; + includedFields?: string[] | null; /** - * Config for user OIDC auth. + * The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the file. */ - oidcConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfigOidcConfig; + instanceType?: string | null; + /** + * The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in excluded_fields. In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + */ + keyField?: string | null; } /** - * Config for authentication with API key. + * Configures the output of BatchPredictionJob. See Model.supported_output_storage_formats for supported output formats, and how predictions are expressed via any of them. */ - export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigApiKeyConfig { + export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputConfig { /** - * Required. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project\}/secrets/{secrete\}/versions/{version\}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + * The BigQuery project or dataset location where the output is to be written to. If project is provided, a new dataset is created with name `prediction__` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both instance and prediction schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has google.rpc.Status represented as a STRUCT, and containing only `code` and `message`. */ - apiKeySecret?: string | null; + bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; /** - * Required. The location of the API key. + * The Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., `predictions_N.` are created where `` depends on chosen predictions_format, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both instance and prediction schemata defined then each such file contains predictions as per the predictions_format. If prediction for any instance failed (partially or completely), then an additional `errors_0001.`, `errors_0002.`,..., `errors_N.` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has google.rpc.Status containing only `code` and `message` fields. */ - httpElementLocation?: string | null; + gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; /** - * Required. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name. + * Required. The format in which Vertex AI gives the predictions, must be one of the Model's supported_output_storage_formats. */ - name?: string | null; + predictionsFormat?: string | null; } /** - * Config for Google Service Account Authentication. + * Further describes this job's output. Supplements output_config. */ - export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigGoogleServiceAccountConfig { + export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputInfo { /** - * Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension. + * Output only. The path of the BigQuery dataset created, in `bq://projectId.bqDatasetId` format, into which the prediction output is written. */ - serviceAccount?: string | null; + bigqueryOutputDataset?: string | null; + /** + * Output only. The name of the BigQuery table created, in `predictions_` format, into which the prediction output is written. Can be used by UI to generate the BigQuery output path, for example. + */ + bigqueryOutputTable?: string | null; + /** + * Output only. The full path of the Cloud Storage directory created, into which the prediction output is written. + */ + gcsOutputDirectory?: string | null; } /** - * Config for HTTP Basic Authentication. + * Details of operations that batch reads Feature values. */ - export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigHttpBasicAuthConfig { + export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesOperationMetadata { /** - * Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project\}/secrets/{secrete\}/versions/{version\}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource. + * Operation metadata for Featurestore batch read Features values. */ - credentialSecret?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Config for user oauth. + * Request message for FeaturestoreService.BatchReadFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigOauthConfig { + export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequest { /** - * Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + * Similar to csv_read_instances, but from BigQuery source. */ - accessToken?: string | null; + bigqueryReadInstances?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; /** - * The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account. + * Each read instance consists of exactly one read timestamp and one or more entity IDs identifying entities of the corresponding EntityTypes whose Features are requested. Each output instance contains Feature values of requested entities concatenated together as of the read time. An example read instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z`. An example output instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each read instance must be millisecond-aligned. `csv_read_instances` are read instances stored in a plain-text CSV file. The header should be: [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in any order. Values in the timestamp column must use the RFC 3339 format, e.g. `2012-07-30T10:43:17.123Z`. */ - serviceAccount?: string | null; - } - /** - * Config for user OIDC auth. - */ - export interface Schema$GoogleCloudAiplatformV1beta1AuthConfigOidcConfig { + csvReadInstances?: Schema$GoogleCloudAiplatformV1beta1CsvSource; /** - * OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. + * Required. Specifies output location and format. */ - idToken?: string | null; + destination?: Schema$GoogleCloudAiplatformV1beta1FeatureValueDestination; /** - * The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + * Required. Specifies EntityType grouping Features to read values of and settings. */ - serviceAccount?: string | null; - } - /** - * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. - */ - export interface Schema$GoogleCloudAiplatformV1beta1AutomaticResources { + entityTypeSpecs?: Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestEntityTypeSpec[]; /** - * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + * When not empty, the specified fields in the *_read_instances source will be joined as-is in the output, in addition to those fields from the Featurestore Entity. For BigQuery source, the type of the pass-through values will be automatically inferred. For CSV source, the pass-through values will be passed as opaque bytes. */ - maxReplicaCount?: number | null; + passThroughFields?: Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestPassThroughField[]; /** - * Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + * Optional. Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. */ - minReplicaCount?: number | null; + startTime?: string | null; } /** - * The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so on) for calculating the desired replica count. + * Selects Features of an EntityType to read values of and specifies read settings. */ - export interface Schema$GoogleCloudAiplatformV1beta1AutoscalingMetricSpec { + export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestEntityTypeSpec { /** - * Required. The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` + * Required. ID of the EntityType to select Features. The EntityType id is the entity_type_id specified during EntityType creation. */ - metricName?: string | null; + entityTypeId?: string | null; /** - * The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. + * Required. Selectors choosing which Feature values to read from the EntityType. */ - target?: number | null; - } - /** - * The storage details for Avro input content. - */ - export interface Schema$GoogleCloudAiplatformV1beta1AvroSource { + featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; /** - * Required. Google Cloud Storage location. + * Per-Feature settings for the batch read. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + settings?: Schema$GoogleCloudAiplatformV1beta1DestinationFeatureSetting[]; } /** - * Request message for PipelineService.BatchCancelPipelineJobs. + * Describe pass-through fields in read_instance source. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestPassThroughField { /** - * Required. The names of the PipelineJobs to cancel. A maximum of 32 PipelineJobs can be cancelled in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` + * Required. The name of the field in the CSV header or the name of the column in BigQuery table. The naming restriction is the same as Feature.name. */ - names?: string[] | null; + fieldName?: string | null; } /** - * Response message for PipelineService.BatchCancelPipelineJobs. + * Response message for FeaturestoreService.BatchReadFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCancelPipelineJobsResponse { - /** - * PipelineJobs cancelled. - */ - pipelineJobs?: Schema$GoogleCloudAiplatformV1beta1PipelineJob[]; - } + export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesResponse {} /** - * Details of operations that perform batch create Features. + * Response message for TensorboardService.BatchReadTensorboardTimeSeriesData. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateFeaturesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1BatchReadTensorboardTimeSeriesDataResponse { /** - * Operation metadata for Feature. + * The returned time series data. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + timeSeriesData?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesData[]; } /** - * Request message for FeaturestoreService.BatchCreateFeatures. + * The BigQuery location for the output content. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BigQueryDestination { /** - * Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message. + * Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. */ - requests?: Schema$GoogleCloudAiplatformV1beta1CreateFeatureRequest[]; + outputUri?: string | null; } /** - * Response message for FeaturestoreService.BatchCreateFeatures. + * The BigQuery location for the input content. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateFeaturesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1BigQuerySource { /** - * The Features created. + * Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. */ - features?: Schema$GoogleCloudAiplatformV1beta1Feature[]; + inputUri?: string | null; } /** - * Request message for TensorboardService.BatchCreateTensorboardRuns. + * Input for bleu metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BleuInput { /** - * Required. The request message specifying the TensorboardRuns to create. A maximum of 1000 TensorboardRuns can be created in a batch. + * Required. Repeated bleu instances. */ - requests?: Schema$GoogleCloudAiplatformV1beta1CreateTensorboardRunRequest[]; + instances?: Schema$GoogleCloudAiplatformV1beta1BleuInstance[]; + /** + * Required. Spec for bleu score metric. + */ + metricSpec?: Schema$GoogleCloudAiplatformV1beta1BleuSpec; } /** - * Response message for TensorboardService.BatchCreateTensorboardRuns. + * Spec for bleu instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardRunsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1BleuInstance { /** - * The created TensorboardRuns. + * Required. Output of the evaluated model. */ - tensorboardRuns?: Schema$GoogleCloudAiplatformV1beta1TensorboardRun[]; + prediction?: string | null; + /** + * Required. Ground truth used to compare against the prediction. + */ + reference?: string | null; } /** - * Request message for TensorboardService.BatchCreateTensorboardTimeSeries. + * Bleu metric value for an instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BleuMetricValue { /** - * Required. The request message specifying the TensorboardTimeSeries to create. A maximum of 1000 TensorboardTimeSeries can be created in a batch. + * Output only. Bleu score. */ - requests?: Schema$GoogleCloudAiplatformV1beta1CreateTensorboardTimeSeriesRequest[]; + score?: number | null; } /** - * Response message for TensorboardService.BatchCreateTensorboardTimeSeries. + * Results for bleu metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchCreateTensorboardTimeSeriesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1BleuResults { /** - * The created TensorboardTimeSeries. + * Output only. Bleu metric values. */ - tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries[]; + bleuMetricValues?: Schema$GoogleCloudAiplatformV1beta1BleuMetricValue[]; } /** - * A description of resources that are used for performing batch operations, are dedicated to a Model, and need manual configuration. + * Spec for bleu score metric - calculates the precision of n-grams in the prediction as compared to reference - returns a score ranging between 0 to 1. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchDedicatedResources { - /** - * Required. Immutable. The specification of a single machine. - */ - machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; + export interface Schema$GoogleCloudAiplatformV1beta1BleuSpec {} + /** + * Content blob. It's preferred to send as text directly rather than raw bytes. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Blob { /** - * Immutable. The maximum number of machine replicas the batch operation may be scaled to. The default value is 10. + * Required. Raw bytes. */ - maxReplicaCount?: number | null; + data?: string | null; /** - * Immutable. The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than max_replica_count + * Required. The IANA standard MIME type of the source data. */ - startingReplicaCount?: number | null; + mimeType?: string | null; } /** - * Request message for PipelineService.BatchDeletePipelineJobs. + * Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1BlurBaselineConfig { /** - * Required. The names of the PipelineJobs to delete. A maximum of 32 PipelineJobs can be deleted in a batch. Format: `projects/{project\}/locations/{location\}/pipelineJobs/{pipelineJob\}` + * The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. */ - names?: string[] | null; + maxBlurSigma?: number | null; } /** - * Response message for PipelineService.BatchDeletePipelineJobs. + * A list of boolean values. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchDeletePipelineJobsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1BoolArray { /** - * PipelineJobs deleted. + * A list of bool values. */ - pipelineJobs?: Schema$GoogleCloudAiplatformV1beta1PipelineJob[]; + values?: boolean[] | null; } /** - * Request message for ModelService.BatchImportEvaluatedAnnotations + * Config of GenAI caching features. This is a singleton resource. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchImportEvaluatedAnnotationsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1CacheConfig { /** - * Required. Evaluated annotations resource to be imported. + * If set to true, disables GenAI caching. Otherwise caching is enabled. */ - evaluatedAnnotations?: Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotation[]; - } - /** - * Response message for ModelService.BatchImportEvaluatedAnnotations - */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchImportEvaluatedAnnotationsResponse { + disableCache?: boolean | null; /** - * Output only. Number of EvaluatedAnnotations imported. + * Identifier. Name of the cache config. Format: - `projects/{project\}/cacheConfig`. */ - importedEvaluatedAnnotationsCount?: number | null; + name?: string | null; } /** - * Request message for ModelService.BatchImportModelEvaluationSlices + * Request message for JobService.CancelBatchPredictionJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchImportModelEvaluationSlicesRequest { - /** - * Required. Model evaluation slice resource to be imported. - */ - modelEvaluationSlices?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSlice[]; - } + export interface Schema$GoogleCloudAiplatformV1beta1CancelBatchPredictionJobRequest {} /** - * Response message for ModelService.BatchImportModelEvaluationSlices + * Request message for JobService.CancelCustomJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchImportModelEvaluationSlicesResponse { - /** - * Output only. List of imported ModelEvaluationSlice.name. - */ - importedModelEvaluationSlices?: string[] | null; - } + export interface Schema$GoogleCloudAiplatformV1beta1CancelCustomJobRequest {} /** - * Runtime operation information for MigrationService.BatchMigrateResources. + * Request message for JobService.CancelDataLabelingJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1CancelDataLabelingJobRequest {} + /** + * Request message for JobService.CancelHyperparameterTuningJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CancelHyperparameterTuningJobRequest {} + /** + * Request message for JobService.CancelNasJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CancelNasJobRequest {} + /** + * Request message for PipelineService.CancelPipelineJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CancelPipelineJobRequest {} + /** + * Request message for PipelineService.CancelTrainingPipeline. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CancelTrainingPipelineRequest {} + /** + * Request message for GenAiTuningService.CancelTuningJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CancelTuningJobRequest {} + /** + * A response candidate generated from the model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Candidate { /** - * The common part of the operation metadata. + * Output only. Source attribution of the generated content. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + citationMetadata?: Schema$GoogleCloudAiplatformV1beta1CitationMetadata; /** - * Partial results that reflect the latest migration operation progress. + * Output only. Content parts of the candidate. */ - partialResults?: Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesOperationMetadataPartialResult[]; - } - /** - * Represents a partial result in batch migration operation for one MigrateResourceRequest. - */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesOperationMetadataPartialResult { + content?: Schema$GoogleCloudAiplatformV1beta1Content; /** - * Migrated dataset resource name. + * Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. */ - dataset?: string | null; + finishMessage?: string | null; /** - * The error result of the migration request in case of failure. + * Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. */ - error?: Schema$GoogleRpcStatus; + finishReason?: string | null; /** - * Migrated model resource name. + * Output only. Metadata specifies sources used to ground generated content. */ - model?: string | null; + groundingMetadata?: Schema$GoogleCloudAiplatformV1beta1GroundingMetadata; /** - * It's the same as the value in MigrateResourceRequest.migrate_resource_requests. + * Output only. Index of the candidate. */ - request?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequest; - } - /** - * Request message for MigrationService.BatchMigrateResources. - */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesRequest { + index?: number | null; /** - * Required. The request messages specifying the resources to migrate. They must be in the same location as the destination. Up to 50 resources can be migrated in one batch. + * Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. */ - migrateResourceRequests?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequest[]; + safetyRatings?: Schema$GoogleCloudAiplatformV1beta1SafetyRating[]; } /** - * Response message for MigrationService.BatchMigrateResources. + * This message will be placed in the metadata field of a google.longrunning.Operation associated with a CheckTrialEarlyStoppingState request. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchMigrateResourcesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateMetatdata { /** - * Successfully migrated resources. + * Operation metadata for suggesting Trials. */ - migrateResourceResponses?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceResponse[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + /** + * The name of the Study that the Trial belongs to. + */ + study?: string | null; + /** + * The Trial name. + */ + trial?: string | null; } /** - * A job that uses a Model to produce predictions on multiple input instances. If predictions for significant portion of the instances fail, the job may finish without attempting predictions for all remaining instances. + * Request message for VizierService.CheckTrialEarlyStoppingState. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJob { - /** - * Output only. Statistics on completed and failed prediction instances. - */ - completionStats?: Schema$GoogleCloudAiplatformV1beta1CompletionStats; + export interface Schema$GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateRequest {} + /** + * Response message for VizierService.CheckTrialEarlyStoppingState. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateResponse { /** - * Output only. Time when the BatchPredictionJob was created. + * True if the Trial should stop. */ - createTime?: string | null; + shouldStop?: boolean | null; + } + /** + * Source attributions for content. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Citation { /** - * The config of resources used by the Model during the batch prediction. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. + * Output only. End index into the content. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1BatchDedicatedResources; + endIndex?: number | null; /** - * For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + * Output only. License of the attribution. */ - disableContainerLogging?: boolean | null; + license?: string | null; /** - * Required. The user-defined name of this BatchPredictionJob. + * Output only. Publication date of the attribution. */ - displayName?: string | null; + publicationDate?: Schema$GoogleTypeDate; /** - * Customer-managed encryption key options for a BatchPredictionJob. If this is set, then all resources created by the BatchPredictionJob will be encrypted with the provided encryption key. + * Output only. Start index into the content. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + startIndex?: number | null; /** - * Output only. Time when the BatchPredictionJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + * Output only. Title of the attribution. */ - endTime?: string | null; + title?: string | null; /** - * Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. + * Output only. Url reference of the attribution. */ - error?: Schema$GoogleRpcStatus; + uri?: string | null; + } + /** + * A collection of source attributions for a piece of content. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CitationMetadata { /** - * Explanation configuration for this BatchPredictionJob. Can be specified only if generate_explanation is set to `true`. This value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of the explanation_spec object is not populated, the corresponding field of the Model.explanation_spec object is inherited. + * Output only. List of citations. */ - explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; + citations?: Schema$GoogleCloudAiplatformV1beta1Citation[]; + } + /** + * Input for coherence metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CoherenceInput { /** - * Generate explanation with the batch prediction results. When set to `true`, the batch prediction output changes based on the `predictions_format` field of the BatchPredictionJob.output_config object: * `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the Explanation object. * `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the Explanation object. * `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_spec must be populated. + * Required. Coherence instance. */ - generateExplanation?: boolean | null; + instance?: Schema$GoogleCloudAiplatformV1beta1CoherenceInstance; /** - * Required. Input configuration of the instances on which predictions are performed. The schema of any single instance may be specified via the Model's PredictSchemata's instance_schema_uri. + * Required. Spec for coherence score metric. */ - inputConfig?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInputConfig; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1CoherenceSpec; + } + /** + * Spec for coherence instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CoherenceInstance { /** - * Configuration for how to convert batch prediction input instances to the prediction instances that are sent to the Model. + * Required. Output of the evaluated model. */ - instanceConfig?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInstanceConfig; + prediction?: string | null; + } + /** + * Spec for coherence result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CoherenceResult { /** - * The labels with user-defined metadata to organize BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Output only. Confidence for coherence score. */ - labels?: {[key: string]: string} | null; + confidence?: number | null; /** - * Immutable. Parameters configuring the batch behavior. Currently only applicable when dedicated_resources are used (in other cases Vertex AI does the tuning itself). + * Output only. Explanation for coherence score. */ - manualBatchTuningParameters?: Schema$GoogleCloudAiplatformV1beta1ManualBatchTuningParameters; + explanation?: string | null; /** - * The name of the Model resource that produces the predictions via this job, must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Exactly one of model and unmanaged_container_model must be set. The model resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. The model resource could also be a publisher model. Example: `publishers/{publisher\}/models/{model\}` or `projects/{project\}/locations/{location\}/publishers/{publisher\}/models/{model\}` + * Output only. Coherence score. */ - model?: string | null; + score?: number | null; + } + /** + * Spec for coherence score metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CoherenceSpec { /** - * Model monitoring config will be used for analysis model behaviors, based on the input and output to the batch prediction job, as well as the provided training dataset. + * Optional. Which version to use for evaluation. */ - modelMonitoringConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringConfig; + version?: number | null; + } + /** + * Request message for VizierService.CompleteTrial. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CompleteTrialRequest { /** - * Get batch prediction job monitoring statistics. + * Optional. If provided, it will be used as the completed Trial's final_measurement; Otherwise, the service will auto-select a previously reported measurement as the final-measurement */ - modelMonitoringStatsAnomalies?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomalies[]; + finalMeasurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; /** - * Output only. The running status of the model monitoring pipeline. + * Optional. A human readable reason why the trial was infeasible. This should only be provided if `trial_infeasible` is true. */ - modelMonitoringStatus?: Schema$GoogleRpcStatus; + infeasibleReason?: string | null; /** - * The parameters that govern the predictions. The schema of the parameters may be specified via the Model's PredictSchemata's parameters_schema_uri. + * Optional. True if the Trial cannot be run with the given Parameter, and final_measurement will be ignored. */ - modelParameters?: any | null; + trialInfeasible?: boolean | null; + } + /** + * Success and error statistics of processing multiple entities (for example, DataItems or structured data rows) in batch. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CompletionStats { /** - * Output only. The version ID of the Model that produces the predictions via this job. + * Output only. The number of entities for which any error was encountered. */ - modelVersionId?: string | null; + failedCount?: string | null; /** - * Output only. Resource name of the BatchPredictionJob. + * Output only. In cases when enough errors are encountered a job, pipeline, or operation may be failed as a whole. Below is the number of entities for which the processing had not been finished (either in successful or failed state). Set to -1 if the number is unknown (for example, the operation failed before the total entity number could be collected). */ - name?: string | null; + incompleteCount?: string | null; /** - * Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of Model's PredictSchemata's instance_schema_uri and prediction_schema_uri. + * Output only. The number of entities that had been processed successfully. */ - outputConfig?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputConfig; + successfulCount?: string | null; /** - * Output only. Information further describing the output of this job. + * Output only. The number of the successful forecast points that are generated by the forecasting model. This is ONLY used by the forecasting batch prediction. */ - outputInfo?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputInfo; + successfulForecastPointCount?: string | null; + } + /** + * Request message for ComputeTokens RPC call. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ComputeTokensRequest { /** - * Output only. Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard Google Cloud error details. + * Required. The instances that are the input to token computing API call. Schema is identical to the prediction schema of the text model, even for the non-text models, like chat models, or Codey models. */ - partialFailures?: Schema$GoogleRpcStatus[]; + instances?: any[] | null; + } + /** + * Response message for ComputeTokens RPC call. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ComputeTokensResponse { /** - * Output only. Information about resources that had been consumed by this job. Provided in real time at best effort basis, as well as a final value once the job completes. Note: This field currently may be not populated for batch predictions that use AutoML Models. + * Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances. */ - resourcesConsumed?: Schema$GoogleCloudAiplatformV1beta1ResourcesConsumed; + tokensInfo?: Schema$GoogleCloudAiplatformV1beta1TokensInfo[]; + } + /** + * The Container Registry location for the container image. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ContainerRegistryDestination { /** - * The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + * Required. Container Registry URI of a container image. Only Google Container Registry and Artifact Registry are supported now. Accepted forms: * Google Container Registry path. For example: `gcr.io/projectId/imageName:tag`. * Artifact Registry path. For example: `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. If a tag is not specified, "latest" will be used as the default tag. */ - serviceAccount?: string | null; + outputUri?: string | null; + } + /** + * The spec of a Container. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ContainerSpec { /** - * Output only. Time when the BatchPredictionJob for the first time entered the `JOB_STATE_RUNNING` state. + * The arguments to be passed when starting the container. */ - startTime?: string | null; + args?: string[] | null; /** - * Output only. The detailed state of the job. + * The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided. */ - state?: string | null; + command?: string[] | null; /** - * Contains model information necessary to perform batch prediction without requiring uploading to model registry. Exactly one of model and unmanaged_container_model must be set. + * Environment variables to be passed to the container. Maximum limit is 100. */ - unmanagedContainerModel?: Schema$GoogleCloudAiplatformV1beta1UnmanagedContainerModel; + env?: Schema$GoogleCloudAiplatformV1beta1EnvVar[]; /** - * Output only. Time when the BatchPredictionJob was most recently updated. + * Required. The URI of a container image in the Container Registry that is to be run on each worker replica. */ - updateTime?: string | null; + imageUri?: string | null; } /** - * Configures the input to BatchPredictionJob. See Model.supported_input_storage_formats for Model's supported input formats, and how instances should be expressed via any of them. + * The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInputConfig { - /** - * The BigQuery location of the input table. The schema of the table should be in the format described by the given context OpenAPI Schema, if one is provided. The table may contain additional columns that are not described by the schema, and they will be ignored. - */ - bigquerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; + export interface Schema$GoogleCloudAiplatformV1beta1Content { /** - * The Cloud Storage location for the input instances. + * Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + parts?: Schema$GoogleCloudAiplatformV1beta1Part[]; /** - * Required. The format in which instances are given, must be one of the Model's supported_input_storage_formats. + * Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. */ - instancesFormat?: string | null; + role?: string | null; } /** - * Configuration defining how to transform batch prediction input instances to the instances that the Model accepts. + * Instance of a general context. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobInstanceConfig { + export interface Schema$GoogleCloudAiplatformV1beta1Context { /** - * Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When excluded_fields is populated, included_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. + * Output only. Timestamp when this Context was created. */ - excludedFields?: string[] | null; + createTime?: string | null; /** - * Fields that will be included in the prediction instance that is sent to the Model. If instance_type is `array`, the order of field names in included_fields also determines the order of the values in the array. When included_fields is populated, excluded_fields must be empty. The input must be JSONL with objects at each line, BigQuery or TfRecord. + * Description of the Context */ - includedFields?: string[] | null; + description?: string | null; /** - * The format of the instance that the Model accepts. Vertex AI will convert compatible batch prediction input instance formats to the specified format. Supported values are: * `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless included_fields is populated. included_fields must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, included_fields must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": \}`, where `` is the Base64-encoded string of the content of the file. + * User provided display name of the Context. May be up to 128 Unicode characters. */ - instanceType?: string | null; + displayName?: string | null; /** - * The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in excluded_fields. In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. + * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - keyField?: string | null; - } - /** - * Configures the output of BatchPredictionJob. See Model.supported_output_storage_formats for supported output formats, and how predictions are expressed via any of them. - */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputConfig { + etag?: string | null; /** - * The BigQuery project or dataset location where the output is to be written to. If project is provided, a new dataset is created with name `prediction__` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both instance and prediction schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has google.rpc.Status represented as a STRUCT, and containing only `code` and `message`. + * The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). */ - bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; + labels?: {[key: string]: string} | null; /** - * The Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., `predictions_N.` are created where `` depends on chosen predictions_format, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both instance and prediction schemata defined then each such file contains predictions as per the predictions_format. If prediction for any instance failed (partially or completely), then an additional `errors_0001.`, `errors_0002.`,..., `errors_N.` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has google.rpc.Status containing only `code` and `message` fields. + * Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + metadata?: {[key: string]: any} | null; /** - * Required. The format in which Vertex AI gives the predictions, must be one of the Model's supported_output_storage_formats. + * Immutable. The resource name of the Context. */ - predictionsFormat?: string | null; - } - /** - * Further describes this job's output. Supplements output_config. - */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchPredictionJobOutputInfo { + name?: string | null; /** - * Output only. The path of the BigQuery dataset created, in `bq://projectId.bqDatasetId` format, into which the prediction output is written. + * Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. */ - bigqueryOutputDataset?: string | null; + parentContexts?: string[] | null; /** - * Output only. The name of the BigQuery table created, in `predictions_` format, into which the prediction output is written. Can be used by UI to generate the BigQuery output path, for example. + * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - bigqueryOutputTable?: string | null; + schemaTitle?: string | null; /** - * Output only. The full path of the Cloud Storage directory created, into which the prediction output is written. + * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - gcsOutputDirectory?: string | null; + schemaVersion?: string | null; + /** + * Output only. Timestamp when this Context was last updated. + */ + updateTime?: string | null; } /** - * Details of operations that batch reads Feature values. + * Details of ModelService.CopyModel operation. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1CopyModelOperationMetadata { /** - * Operation metadata for Featurestore batch read Features values. + * The common part of the operation metadata. */ genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Request message for FeaturestoreService.BatchReadFeatureValues. + * Request message for ModelService.CopyModel. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1CopyModelRequest { /** - * Similar to csv_read_instances, but from BigQuery source. + * Customer-managed encryption key options. If this is set, then the Model copy will be encrypted with the provided encryption key. */ - bigqueryReadInstances?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Each read instance consists of exactly one read timestamp and one or more entity IDs identifying entities of the corresponding EntityTypes whose Features are requested. Each output instance contains Feature values of requested entities concatenated together as of the read time. An example read instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z`. An example output instance may be `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each read instance must be millisecond-aligned. `csv_read_instances` are read instances stored in a plain-text CSV file. The header should be: [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in any order. Values in the timestamp column must use the RFC 3339 format, e.g. `2012-07-30T10:43:17.123Z`. + * Optional. Copy source_model into a new Model with this ID. The ID will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. */ - csvReadInstances?: Schema$GoogleCloudAiplatformV1beta1CsvSource; + modelId?: string | null; /** - * Required. Specifies output location and format. + * Optional. Specify this field to copy source_model into this existing Model as a new version. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - destination?: Schema$GoogleCloudAiplatformV1beta1FeatureValueDestination; + parentModel?: string | null; /** - * Required. Specifies EntityType grouping Features to read values of and settings. + * Required. The resource name of the Model to copy. That Model must be in the same Project. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - entityTypeSpecs?: Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestEntityTypeSpec[]; + sourceModel?: string | null; + } + /** + * Response message of ModelService.CopyModel operation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CopyModelResponse { /** - * When not empty, the specified fields in the *_read_instances source will be joined as-is in the output, in addition to those fields from the Featurestore Entity. For BigQuery source, the type of the pass-through values will be automatically inferred. For CSV source, the pass-through values will be passed as opaque bytes. + * The name of the copied Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - passThroughFields?: Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestPassThroughField[]; + model?: string | null; /** - * Optional. Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. + * Output only. The version ID of the model that is copied. */ - startTime?: string | null; + modelVersionId?: string | null; } /** - * Selects Features of an EntityType to read values of and specifies read settings. + * Request message for PredictionService.CountTokens. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestEntityTypeSpec { + export interface Schema$GoogleCloudAiplatformV1beta1CountTokensRequest { /** - * Required. ID of the EntityType to select Features. The EntityType id is the entity_type_id specified during EntityType creation. + * Required. Input content. */ - entityTypeId?: string | null; + contents?: Schema$GoogleCloudAiplatformV1beta1Content[]; /** - * Required. Selectors choosing which Feature values to read from the EntityType. + * Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. */ - featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; + instances?: any[] | null; /** - * Per-Feature settings for the batch read. + * Required. The name of the publisher model requested to serve the prediction. Format: `projects/{project\}/locations/{location\}/publishers/x/models/x` */ - settings?: Schema$GoogleCloudAiplatformV1beta1DestinationFeatureSetting[]; + model?: string | null; } /** - * Describe pass-through fields in read_instance source. + * Response message for PredictionService.CountTokens. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesRequestPassThroughField { + export interface Schema$GoogleCloudAiplatformV1beta1CountTokensResponse { /** - * Required. The name of the field in the CSV header or the name of the column in BigQuery table. The naming restriction is the same as Feature.name. + * The total number of billable characters counted across all instances from the request. */ - fieldName?: string | null; + totalBillableCharacters?: number | null; + /** + * The total number of tokens counted across all instances from the request. + */ + totalTokens?: number | null; } /** - * Response message for FeaturestoreService.BatchReadFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchReadFeatureValuesResponse {} - /** - * Response message for TensorboardService.BatchReadTensorboardTimeSeriesData. + * Runtime operation information for DatasetService.CreateDataset. */ - export interface Schema$GoogleCloudAiplatformV1beta1BatchReadTensorboardTimeSeriesDataResponse { + export interface Schema$GoogleCloudAiplatformV1beta1CreateDatasetOperationMetadata { /** - * The returned time series data. + * The operation generic information. */ - timeSeriesData?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesData[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * The BigQuery location for the output content. + * Runtime operation information for DatasetService.CreateDatasetVersion. */ - export interface Schema$GoogleCloudAiplatformV1beta1BigQueryDestination { + export interface Schema$GoogleCloudAiplatformV1beta1CreateDatasetVersionOperationMetadata { /** - * Required. BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: * BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. + * The common part of the operation metadata. */ - outputUri?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * The BigQuery location for the input content. + * Runtime operation information for CreateDeploymentResourcePool method. */ - export interface Schema$GoogleCloudAiplatformV1beta1BigQuerySource { + export interface Schema$GoogleCloudAiplatformV1beta1CreateDeploymentResourcePoolOperationMetadata { /** - * Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. + * The operation generic information. */ - inputUri?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Input for bleu metric. + * Request message for CreateDeploymentResourcePool method. */ - export interface Schema$GoogleCloudAiplatformV1beta1BleuInput { + export interface Schema$GoogleCloudAiplatformV1beta1CreateDeploymentResourcePoolRequest { /** - * Required. Repeated bleu instances. + * Required. The DeploymentResourcePool to create. */ - instances?: Schema$GoogleCloudAiplatformV1beta1BleuInstance[]; + deploymentResourcePool?: Schema$GoogleCloudAiplatformV1beta1DeploymentResourcePool; /** - * Required. Spec for bleu score metric. + * Required. The ID to use for the DeploymentResourcePool, which will become the final component of the DeploymentResourcePool's resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61\}[a-z0-9])?$/`. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1BleuSpec; + deploymentResourcePoolId?: string | null; } /** - * Spec for bleu instance. + * Runtime operation information for EndpointService.CreateEndpoint. */ - export interface Schema$GoogleCloudAiplatformV1beta1BleuInstance { - /** - * Required. Output of the evaluated model. - */ - prediction?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1CreateEndpointOperationMetadata { /** - * Required. Ground truth used to compare against the prediction. + * The operation generic information. */ - reference?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Bleu metric value for an instance. + * Details of operations that perform create EntityType. */ - export interface Schema$GoogleCloudAiplatformV1beta1BleuMetricValue { + export interface Schema$GoogleCloudAiplatformV1beta1CreateEntityTypeOperationMetadata { /** - * Output only. Bleu score. + * Operation metadata for EntityType. */ - score?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Results for bleu metric. + * Details of ExtensionControllerService.CreateExtensionController operation. */ - export interface Schema$GoogleCloudAiplatformV1beta1BleuResults { + export interface Schema$GoogleCloudAiplatformV1beta1CreateExtensionControllerOperationMetadata { /** - * Output only. Bleu metric values. + * The common part of the operation metadata. */ - bleuMetricValues?: Schema$GoogleCloudAiplatformV1beta1BleuMetricValue[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Spec for bleu score metric - calculates the precision of n-grams in the prediction as compared to reference - returns a score ranging between 0 to 1. - */ - export interface Schema$GoogleCloudAiplatformV1beta1BleuSpec {} - /** - * Content blob. It's preferred to send as text directly rather than raw bytes. + * Details of operations that perform create FeatureGroup. */ - export interface Schema$GoogleCloudAiplatformV1beta1Blob { - /** - * Required. Raw bytes. - */ - data?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureGroupOperationMetadata { /** - * Required. The IANA standard MIME type of the source data. + * Operation metadata for FeatureGroup. */ - mimeType?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + * Details of operations that perform create FeatureOnlineStore. */ - export interface Schema$GoogleCloudAiplatformV1beta1BlurBaselineConfig { + export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureOnlineStoreOperationMetadata { /** - * The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline. + * Operation metadata for FeatureOnlineStore. */ - maxBlurSigma?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * A list of boolean values. + * Details of operations that perform create Feature. */ - export interface Schema$GoogleCloudAiplatformV1beta1BoolArray { + export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureOperationMetadata { /** - * A list of bool values. + * Operation metadata for Feature. */ - values?: boolean[] | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Config of GenAI caching features. This is a singleton resource. + * Request message for FeaturestoreService.CreateFeature. Request message for FeatureRegistryService.CreateFeature. */ - export interface Schema$GoogleCloudAiplatformV1beta1CacheConfig { + export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureRequest { /** - * If set to true, disables GenAI caching. Otherwise caching is enabled. + * Required. The Feature to create. */ - disableCache?: boolean | null; + feature?: Schema$GoogleCloudAiplatformV1beta1Feature; /** - * Identifier. Name of the cache config. Format: - `projects/{project\}/cacheConfig`. + * Required. The ID to use for the Feature, which will become the final component of the Feature's resource name. This value may be up to 128 characters, and valid characters are `[a-z0-9_]`. The first character cannot be a number. The value must be unique within an EntityType/FeatureGroup. */ - name?: string | null; + featureId?: string | null; + /** + * Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` Format for feature_group as parent: `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}` + */ + parent?: string | null; } /** - * Request message for JobService.CancelBatchPredictionJob. + * Details of operations that perform create Featurestore. */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelBatchPredictionJobRequest {} + export interface Schema$GoogleCloudAiplatformV1beta1CreateFeaturestoreOperationMetadata { + /** + * Operation metadata for Featurestore. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } /** - * Request message for JobService.CancelCustomJob. + * Details of operations that perform create FeatureView. */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelCustomJobRequest {} + export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureViewOperationMetadata { + /** + * Operation metadata for FeatureView Create. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } /** - * Request message for JobService.CancelDataLabelingJob. + * Runtime operation information for IndexEndpointService.CreateIndexEndpoint. */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelDataLabelingJobRequest {} + export interface Schema$GoogleCloudAiplatformV1beta1CreateIndexEndpointOperationMetadata { + /** + * The operation generic information. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } /** - * Request message for JobService.CancelHyperparameterTuningJob. + * Runtime operation information for IndexService.CreateIndex. */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelHyperparameterTuningJobRequest {} + export interface Schema$GoogleCloudAiplatformV1beta1CreateIndexOperationMetadata { + /** + * The operation generic information. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + /** + * The operation metadata with regard to Matching Engine Index operation. + */ + nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata; + } /** - * Request message for JobService.CancelNasJob. + * Details of operations that perform MetadataService.CreateMetadataStore. */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelNasJobRequest {} - /** - * Request message for PipelineService.CancelPipelineJob. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelPipelineJobRequest {} - /** - * Request message for PipelineService.CancelTrainingPipeline. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelTrainingPipelineRequest {} - /** - * Request message for GenAiTuningService.CancelTuningJob. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CancelTuningJobRequest {} - /** - * A response candidate generated from the model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Candidate { - /** - * Output only. Source attribution of the generated content. - */ - citationMetadata?: Schema$GoogleCloudAiplatformV1beta1CitationMetadata; - /** - * Output only. Content parts of the candidate. - */ - content?: Schema$GoogleCloudAiplatformV1beta1Content; - /** - * Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. - */ - finishMessage?: string | null; - /** - * Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. - */ - finishReason?: string | null; - /** - * Output only. Metadata specifies sources used to ground generated content. - */ - groundingMetadata?: Schema$GoogleCloudAiplatformV1beta1GroundingMetadata; - /** - * Output only. Index of the candidate. - */ - index?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1CreateMetadataStoreOperationMetadata { /** - * Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. + * Operation metadata for creating a MetadataStore. */ - safetyRatings?: Schema$GoogleCloudAiplatformV1beta1SafetyRating[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * This message will be placed in the metadata field of a google.longrunning.Operation associated with a CheckTrialEarlyStoppingState request. + * Request message for ModelMonitoringService.CreateModelMonitoringJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateMetatdata { + export interface Schema$GoogleCloudAiplatformV1beta1CreateModelMonitoringJobRequest { /** - * Operation metadata for suggesting Trials. + * Required. The ModelMonitoringJob to create */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + modelMonitoringJob?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJob; /** - * The name of the Study that the Trial belongs to. + * Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61\}[a-z0-9])?$/`. */ - study?: string | null; + modelMonitoringJobId?: string | null; /** - * The Trial name. + * Required. The parent of the ModelMonitoringJob. Format: `projects/{project\}/locations/{location\}/modelMoniitors/{model_monitor\}` */ - trial?: string | null; + parent?: string | null; } /** - * Request message for VizierService.CheckTrialEarlyStoppingState. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateRequest {} - /** - * Response message for VizierService.CheckTrialEarlyStoppingState. + * Runtime operation information for ModelMonitoringService.CreateModelMonitor. */ - export interface Schema$GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateResponse { + export interface Schema$GoogleCloudAiplatformV1beta1CreateModelMonitorOperationMetadata { /** - * True if the Trial should stop. + * The operation generic information. */ - shouldStop?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Source attributions for content. + * Request message for [NotebookService.CreateNotebookExecutionJob] */ - export interface Schema$GoogleCloudAiplatformV1beta1Citation { + export interface Schema$GoogleCloudAiplatformV1beta1CreateNotebookExecutionJobRequest { /** - * Output only. End index into the content. + * Required. The NotebookExecutionJob to create. */ - endIndex?: number | null; + notebookExecutionJob?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJob; /** - * Output only. License of the attribution. + * Optional. User specified ID for the NotebookExecutionJob. */ - license?: string | null; + notebookExecutionJobId?: string | null; /** - * Output only. Publication date of the attribution. + * Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project\}/locations/{location\}` */ - publicationDate?: Schema$GoogleTypeDate; + parent?: string | null; + } + /** + * Metadata information for NotebookService.CreateNotebookRuntimeTemplate. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CreateNotebookRuntimeTemplateOperationMetadata { /** - * Output only. Start index into the content. + * The operation generic information. */ - startIndex?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Details of operations that perform create PersistentResource. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CreatePersistentResourceOperationMetadata { /** - * Output only. Title of the attribution. + * Operation metadata for PersistentResource. */ - title?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Output only. Url reference of the attribution. + * Progress Message for Create LRO */ - uri?: string | null; + progressMessage?: string | null; } /** - * A collection of source attributions for a piece of content. + * Request message for PipelineService.CreatePipelineJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1CitationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1CreatePipelineJobRequest { /** - * Output only. List of citations. + * Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project\}/locations/{location\}` */ - citations?: Schema$GoogleCloudAiplatformV1beta1Citation[]; - } - /** - * Input for coherence metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CoherenceInput { + parent?: string | null; /** - * Required. Coherence instance. + * Required. The PipelineJob to create. */ - instance?: Schema$GoogleCloudAiplatformV1beta1CoherenceInstance; + pipelineJob?: Schema$GoogleCloudAiplatformV1beta1PipelineJob; /** - * Required. Spec for coherence score metric. + * The ID to use for the PipelineJob, which will become the final component of the PipelineJob name. If not provided, an ID will be automatically generated. This value should be less than 128 characters, and valid characters are `/a-z-/`. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1CoherenceSpec; + pipelineJobId?: string | null; } /** - * Spec for coherence instance. + * Details of operations that perform create FeatureGroup. */ - export interface Schema$GoogleCloudAiplatformV1beta1CoherenceInstance { + export interface Schema$GoogleCloudAiplatformV1beta1CreateRegistryFeatureOperationMetadata { /** - * Required. Output of the evaluated model. + * Operation metadata for Feature. */ - prediction?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Spec for coherence result. + * Runtime operation information for SolverService.CreateSolver. */ - export interface Schema$GoogleCloudAiplatformV1beta1CoherenceResult { - /** - * Output only. Confidence for coherence score. - */ - confidence?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1CreateSolverOperationMetadata { /** - * Output only. Explanation for coherence score. + * The generic operation information. */ - explanation?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Runtime operation information for SpecialistPoolService.CreateSpecialistPool. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CreateSpecialistPoolOperationMetadata { /** - * Output only. Coherence score. + * The operation generic information. */ - score?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Spec for coherence score metric. + * Details of operations that perform create Tensorboard. */ - export interface Schema$GoogleCloudAiplatformV1beta1CoherenceSpec { + export interface Schema$GoogleCloudAiplatformV1beta1CreateTensorboardOperationMetadata { /** - * Optional. Which version to use for evaluation. + * Operation metadata for Tensorboard. */ - version?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Request message for VizierService.CompleteTrial. + * Request message for TensorboardService.CreateTensorboardRun. */ - export interface Schema$GoogleCloudAiplatformV1beta1CompleteTrialRequest { + export interface Schema$GoogleCloudAiplatformV1beta1CreateTensorboardRunRequest { /** - * Optional. If provided, it will be used as the completed Trial's final_measurement; Otherwise, the service will auto-select a previously reported measurement as the final-measurement + * Required. The resource name of the TensorboardExperiment to create the TensorboardRun in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` */ - finalMeasurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; + parent?: string | null; /** - * Optional. A human readable reason why the trial was infeasible. This should only be provided if `trial_infeasible` is true. + * Required. The TensorboardRun to create. */ - infeasibleReason?: string | null; + tensorboardRun?: Schema$GoogleCloudAiplatformV1beta1TensorboardRun; /** - * Optional. True if the Trial cannot be run with the given Parameter, and final_measurement will be ignored. + * Required. The ID to use for the Tensorboard run, which becomes the final component of the Tensorboard run's resource name. This value should be 1-128 characters, and valid characters are `/a-z-/`. */ - trialInfeasible?: boolean | null; + tensorboardRunId?: string | null; } /** - * Success and error statistics of processing multiple entities (for example, DataItems or structured data rows) in batch. + * Request message for TensorboardService.CreateTensorboardTimeSeries. */ - export interface Schema$GoogleCloudAiplatformV1beta1CompletionStats { - /** - * Output only. The number of entities for which any error was encountered. - */ - failedCount?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1CreateTensorboardTimeSeriesRequest { /** - * Output only. In cases when enough errors are encountered a job, pipeline, or operation may be failed as a whole. Below is the number of entities for which the processing had not been finished (either in successful or failed state). Set to -1 if the number is unknown (for example, the operation failed before the total entity number could be collected). + * Required. The resource name of the TensorboardRun to create the TensorboardTimeSeries in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` */ - incompleteCount?: string | null; + parent?: string | null; /** - * Output only. The number of entities that had been processed successfully. + * Required. The TensorboardTimeSeries to create. */ - successfulCount?: string | null; + tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries; /** - * Output only. The number of the successful forecast points that are generated by the forecasting model. This is ONLY used by the forecasting batch prediction. + * Optional. The user specified unique ID to use for the TensorboardTimeSeries, which becomes the final component of the TensorboardTimeSeries's resource name. This value should match "a-z0-9{0, 127\}" */ - successfulForecastPointCount?: string | null; + tensorboardTimeSeriesId?: string | null; } /** - * Request message for ComputeTokens RPC call. + * The storage details for CSV output content. */ - export interface Schema$GoogleCloudAiplatformV1beta1ComputeTokensRequest { + export interface Schema$GoogleCloudAiplatformV1beta1CsvDestination { /** - * Required. The instances that are the input to token computing API call. Schema is identical to the prediction schema of the text model, even for the non-text models, like chat models, or Codey models. + * Required. Google Cloud Storage location. */ - instances?: any[] | null; + gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; } /** - * Response message for ComputeTokens RPC call. + * The storage details for CSV input content. */ - export interface Schema$GoogleCloudAiplatformV1beta1ComputeTokensResponse { + export interface Schema$GoogleCloudAiplatformV1beta1CsvSource { /** - * Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances. + * Required. Google Cloud Storage location. */ - tokensInfo?: Schema$GoogleCloudAiplatformV1beta1TokensInfo[]; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; } /** - * The Container Registry location for the container image. + * Represents a job that runs custom workloads such as a Docker container or a Python package. A CustomJob can have multiple worker pools and each worker pool can have its own machine and input spec. A CustomJob will be cleaned up once the job enters terminal state (failed or succeeded). */ - export interface Schema$GoogleCloudAiplatformV1beta1ContainerRegistryDestination { + export interface Schema$GoogleCloudAiplatformV1beta1CustomJob { /** - * Required. Container Registry URI of a container image. Only Google Container Registry and Artifact Registry are supported now. Accepted forms: * Google Container Registry path. For example: `gcr.io/projectId/imageName:tag`. * Artifact Registry path. For example: `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. If a tag is not specified, "latest" will be used as the default tag. + * Output only. Time when the CustomJob was created. */ - outputUri?: string | null; - } - /** - * The spec of a Container. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ContainerSpec { + createTime?: string | null; /** - * The arguments to be passed when starting the container. + * Required. The display name of the CustomJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - args?: string[] | null; + displayName?: string | null; /** - * The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided. + * Customer-managed encryption key options for a CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. */ - command?: string[] | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Environment variables to be passed to the container. Maximum limit is 100. + * Output only. Time when the CustomJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ - env?: Schema$GoogleCloudAiplatformV1beta1EnvVar[]; + endTime?: string | null; /** - * Required. The URI of a container image in the Container Registry that is to be run on each worker replica. + * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ - imageUri?: string | null; - } - /** - * The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Content { + error?: Schema$GoogleRpcStatus; /** - * Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + * Required. Job spec. */ - parts?: Schema$GoogleCloudAiplatformV1beta1Part[]; + jobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; /** - * Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + * The labels with user-defined metadata to organize CustomJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - role?: string | null; - } - /** - * Instance of a general context. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Context { + labels?: {[key: string]: string} | null; /** - * Output only. Timestamp when this Context was created. + * Output only. Resource name of a CustomJob. */ - createTime?: string | null; + name?: string | null; /** - * Description of the Context + * Output only. Time when the CustomJob for the first time entered the `JOB_STATE_RUNNING` state. */ - description?: string | null; + startTime?: string | null; /** - * User provided display name of the Context. May be up to 128 Unicode characters. + * Output only. The detailed state of the job. */ - displayName?: string | null; + state?: string | null; /** - * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Output only. Time when the CustomJob was most recently updated. */ - etag?: string | null; + updateTime?: string | null; /** - * The labels with user-defined metadata to organize your Contexts. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Context (System labels are excluded). + * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if job_spec.enable_web_access is `true`. The keys are names of each node in the training job; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. */ - labels?: {[key: string]: string} | null; + webAccessUris?: {[key: string]: string} | null; + } + /** + * Represents the spec of a CustomJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1CustomJobSpec { /** - * Properties of the Context. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + * The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` */ - metadata?: {[key: string]: any} | null; + baseOutputDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; /** - * Immutable. The resource name of the Context. + * Optional. Whether you want Vertex AI to enable access to the customized dashboard in training chief container. If set to `true`, you can access the dashboard at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). */ - name?: string | null; + enableDashboardAccess?: boolean | null; /** - * Output only. A list of resource names of Contexts that are parents of this Context. A Context may have at most 10 parent_contexts. + * Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). */ - parentContexts?: string[] | null; + enableWebAccess?: boolean | null; /** - * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Optional. The Experiment associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}` */ - schemaTitle?: string | null; + experiment?: string | null; /** - * The version of the schema in schema_name to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Optional. The Experiment Run associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}-{experiment-run-name\}` */ - schemaVersion?: string | null; + experimentRun?: string | null; /** - * Output only. Timestamp when this Context was last updated. + * Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project\}/locations/{location\}/models/{model\}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. */ - updateTime?: string | null; - } - /** - * Details of ModelService.CopyModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CopyModelOperationMetadata { + models?: string[] | null; /** - * The common part of the operation metadata. + * Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for ModelService.CopyModel. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CopyModelRequest { + network?: string | null; /** - * Customer-managed encryption key options. If this is set, then the Model copy will be encrypted with the provided encryption key. + * Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + persistentResourceId?: string | null; /** - * Optional. Copy source_model into a new Model with this ID. The ID will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + * The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations */ - modelId?: string | null; + protectedArtifactLocationId?: string | null; /** - * Optional. Specify this field to copy source_model into this existing Model as a new version. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. */ - parentModel?: string | null; + reservedIpRanges?: string[] | null; /** - * Required. The resource name of the Model to copy. That Model must be in the same Project. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Scheduling options for a CustomJob. */ - sourceModel?: string | null; - } - /** - * Response message of ModelService.CopyModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CopyModelResponse { + scheduling?: Schema$GoogleCloudAiplatformV1beta1Scheduling; /** - * The name of the copied Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. If unspecified, the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project is used. */ - model?: string | null; + serviceAccount?: string | null; /** - * Output only. The version ID of the model that is copied. + * Optional. The name of a Vertex AI Tensorboard resource to which this CustomJob will upload Tensorboard logs. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` */ - modelVersionId?: string | null; + tensorboard?: string | null; + /** + * Required. The spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. + */ + workerPoolSpecs?: Schema$GoogleCloudAiplatformV1beta1WorkerPoolSpec[]; } /** - * Request message for PredictionService.CountTokens. + * A piece of data in a Dataset. Could be an image, a video, a document or plain text. */ - export interface Schema$GoogleCloudAiplatformV1beta1CountTokensRequest { + export interface Schema$GoogleCloudAiplatformV1beta1DataItem { /** - * Required. Input content. + * Output only. Timestamp when this DataItem was created. */ - contents?: Schema$GoogleCloudAiplatformV1beta1Content[]; + createTime?: string | null; /** - * Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - instances?: any[] | null; + etag?: string | null; /** - * Required. The name of the publisher model requested to serve the prediction. Format: `projects/{project\}/locations/{location\}/publishers/x/models/x` + * Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one DataItem(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - model?: string | null; - } - /** - * Response message for PredictionService.CountTokens. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CountTokensResponse { + labels?: {[key: string]: string} | null; /** - * The total number of billable characters counted across all instances from the request. + * Output only. The resource name of the DataItem. */ - totalBillableCharacters?: number | null; + name?: string | null; /** - * The total number of tokens counted across all instances from the request. + * Required. The data that the DataItem represents (for example, an image or a text snippet). The schema of the payload is stored in the parent Dataset's metadata schema's dataItemSchemaUri field. */ - totalTokens?: number | null; - } - /** - * Runtime operation information for DatasetService.CreateDataset. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateDatasetOperationMetadata { + payload?: any | null; /** - * The operation generic information. + * Output only. Timestamp when this DataItem was last updated. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + updateTime?: string | null; } /** - * Runtime operation information for DatasetService.CreateDatasetVersion. + * A container for a single DataItem and Annotations on it. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateDatasetVersionOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1DataItemView { /** - * The common part of the operation metadata. + * The Annotations on the DataItem. If too many Annotations should be returned for the DataItem, this field will be truncated per annotations_limit in request. If it was, then the has_truncated_annotations will be set to true. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Runtime operation information for CreateDeploymentResourcePool method. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateDeploymentResourcePoolOperationMetadata { + annotations?: Schema$GoogleCloudAiplatformV1beta1Annotation[]; /** - * The operation generic information. + * The DataItem. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + dataItem?: Schema$GoogleCloudAiplatformV1beta1DataItem; + /** + * True if and only if the Annotations field has been truncated. It happens if more Annotations for this DataItem met the request's annotation_filter than are allowed to be returned by annotations_limit. Note that if Annotations field is not being returned due to field mask, then this field will not be set to true no matter how many Annotations are there. + */ + hasTruncatedAnnotations?: boolean | null; } /** - * Request message for CreateDeploymentResourcePool method. + * DataLabelingJob is used to trigger a human labeling job on unlabeled data from the following Dataset: */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateDeploymentResourcePoolRequest { + export interface Schema$GoogleCloudAiplatformV1beta1DataLabelingJob { /** - * Required. The DeploymentResourcePool to create. + * Parameters that configure the active learning pipeline. Active learning will label the data incrementally via several iterations. For every iteration, it will select a batch of data based on the sampling strategy. */ - deploymentResourcePool?: Schema$GoogleCloudAiplatformV1beta1DeploymentResourcePool; + activeLearningConfig?: Schema$GoogleCloudAiplatformV1beta1ActiveLearningConfig; /** - * Required. The ID to use for the DeploymentResourcePool, which will become the final component of the DeploymentResourcePool's resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61\}[a-z0-9])?$/`. + * Labels to assign to annotations generated by this DataLabelingJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - deploymentResourcePoolId?: string | null; - } - /** - * Runtime operation information for EndpointService.CreateEndpoint. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateEndpointOperationMetadata { + annotationLabels?: {[key: string]: string} | null; /** - * The operation generic information. + * Output only. Timestamp when this DataLabelingJob was created. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform create EntityType. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateEntityTypeOperationMetadata { + createTime?: string | null; /** - * Operation metadata for EntityType. + * Output only. Estimated cost(in US dollars) that the DataLabelingJob has incurred to date. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of ExtensionControllerService.CreateExtensionController operation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateExtensionControllerOperationMetadata { + currentSpend?: Schema$GoogleTypeMoney; /** - * The common part of the operation metadata. + * Required. Dataset resource names. Right now we only support labeling from a single Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}` */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform create FeatureGroup. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureGroupOperationMetadata { + datasets?: string[] | null; /** - * Operation metadata for FeatureGroup. + * Required. The user-defined name of the DataLabelingJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a DataLabelingJob. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform create FeatureOnlineStore. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureOnlineStoreOperationMetadata { + displayName?: string | null; /** - * Operation metadata for FeatureOnlineStore. + * Customer-managed encryption key spec for a DataLabelingJob. If set, this DataLabelingJob will be secured by this key. Note: Annotations created in the DataLabelingJob are associated with the EncryptionSpec of the Dataset they are exported to. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform create Feature. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureOperationMetadata { + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Operation metadata for Feature. + * Output only. DataLabelingJob errors. It is only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for FeaturestoreService.CreateFeature. Request message for FeatureRegistryService.CreateFeature. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureRequest { + error?: Schema$GoogleRpcStatus; /** - * Required. The Feature to create. + * Required. Input config parameters for the DataLabelingJob. */ - feature?: Schema$GoogleCloudAiplatformV1beta1Feature; + inputs?: any | null; /** - * Required. The ID to use for the Feature, which will become the final component of the Feature's resource name. This value may be up to 128 characters, and valid characters are `[a-z0-9_]`. The first character cannot be a number. The value must be unique within an EntityType/FeatureGroup. + * Required. Points to a YAML file stored on Google Cloud Storage describing the config for a specific type of DataLabelingJob. The schema files that can be used here are found in the https://storage.googleapis.com/google-cloud-aiplatform bucket in the /schema/datalabelingjob/inputs/ folder. */ - featureId?: string | null; + inputsSchemaUri?: string | null; /** - * Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` Format for feature_group as parent: `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}` + * Required. The Google Cloud Storage location of the instruction pdf. This pdf is shared with labelers, and provides detailed description on how to label DataItems in Datasets. */ - parent?: string | null; - } - /** - * Details of operations that perform create Featurestore. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateFeaturestoreOperationMetadata { + instructionUri?: string | null; /** - * Operation metadata for Featurestore. + * Required. Number of labelers to work on each DataItem. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform create FeatureView. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateFeatureViewOperationMetadata { + labelerCount?: number | null; /** - * Operation metadata for FeatureView Create. + * Output only. Current labeling job progress percentage scaled in interval [0, 100], indicating the percentage of DataItems that has been finished. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Runtime operation information for IndexEndpointService.CreateIndexEndpoint. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateIndexEndpointOperationMetadata { + labelingProgress?: number | null; /** - * The operation generic information. + * The labels with user-defined metadata to organize your DataLabelingJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each DataLabelingJob: * "aiplatform.googleapis.com/schema": output only, its value is the inputs_schema's title. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Runtime operation information for IndexService.CreateIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateIndexOperationMetadata { + labels?: {[key: string]: string} | null; /** - * The operation generic information. + * Output only. Resource name of the DataLabelingJob. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + name?: string | null; /** - * The operation metadata with regard to Matching Engine Index operation. + * The SpecialistPools' resource names associated with this job. */ - nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata; - } - /** - * Details of operations that perform MetadataService.CreateMetadataStore. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateMetadataStoreOperationMetadata { + specialistPools?: string[] | null; /** - * Operation metadata for creating a MetadataStore. + * Output only. The detailed state of the job. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + state?: string | null; + /** + * Output only. Timestamp when this DataLabelingJob was updated most recently. + */ + updateTime?: string | null; } /** - * Request message for ModelMonitoringService.CreateModelMonitoringJob. + * A collection of DataItems and Annotations on them. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateModelMonitoringJobRequest { + export interface Schema$GoogleCloudAiplatformV1beta1Dataset { /** - * Required. The ModelMonitoringJob to create + * Output only. Timestamp when this Dataset was created. */ - modelMonitoringJob?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJob; + createTime?: string | null; /** - * Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61\}[a-z0-9])?$/`. + * Output only. The number of DataItems in this Dataset. Only apply for non-structured Dataset. */ - modelMonitoringJobId?: string | null; + dataItemCount?: string | null; /** - * Required. The parent of the ModelMonitoringJob. Format: `projects/{project\}/locations/{location\}/modelMoniitors/{model_monitor\}` + * The description of the Dataset. */ - parent?: string | null; - } - /** - * Runtime operation information for ModelMonitoringService.CreateModelMonitor. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateModelMonitorOperationMetadata { + description?: string | null; /** - * The operation generic information. + * Required. The user-defined name of the Dataset. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for [NotebookService.CreateNotebookExecutionJob] - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateNotebookExecutionJobRequest { + displayName?: string | null; /** - * Required. The NotebookExecutionJob to create. + * Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. */ - notebookExecutionJob?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJob; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Optional. User specified ID for the NotebookExecutionJob. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - notebookExecutionJobId?: string | null; + etag?: string | null; /** - * Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project\}/locations/{location\}` + * The labels with user-defined metadata to organize your Datasets. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Dataset: * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its value is the metadata_schema's title. */ - parent?: string | null; - } - /** - * Metadata information for NotebookService.CreateNotebookRuntimeTemplate. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateNotebookRuntimeTemplateOperationMetadata { + labels?: {[key: string]: string} | null; /** - * The operation generic information. + * Required. Additional information about the Dataset. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform create PersistentResource. - */ - export interface Schema$GoogleCloudAiplatformV1beta1CreatePersistentResourceOperationMetadata { + metadata?: any | null; /** - * Operation metadata for PersistentResource. + * Output only. The resource name of the Artifact that was created in MetadataStore when creating the Dataset. The Artifact resource name pattern is `projects/{project\}/locations/{location\}/metadataStores/{metadata_store\}/artifacts/{artifact\}`. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + metadataArtifact?: string | null; /** - * Progress Message for Create LRO + * Required. Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/. */ - progressMessage?: string | null; + metadataSchemaUri?: string | null; + /** + * Optional. Reference to the public base model last used by the dataset. Only set for prompt datasets. + */ + modelReference?: string | null; + /** + * Output only. The resource name of the Dataset. + */ + name?: string | null; + /** + * All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will not be populated except for UI cases which will only use annotation_spec_count. In CreateDataset request, a SavedQuery is created together if this field is set, up to one SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not contain any AnnotationSpec. + */ + savedQueries?: Schema$GoogleCloudAiplatformV1beta1SavedQuery[]; + /** + * Output only. Timestamp when this Dataset was last updated. + */ + updateTime?: string | null; } /** - * Request message for PipelineService.CreatePipelineJob. + * Describes the dataset version. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreatePipelineJobRequest { + export interface Schema$GoogleCloudAiplatformV1beta1DatasetVersion { /** - * Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project\}/locations/{location\}` + * Output only. Name of the associated BigQuery dataset. */ - parent?: string | null; + bigQueryDatasetName?: string | null; /** - * Required. The PipelineJob to create. + * Output only. Timestamp when this DatasetVersion was created. */ - pipelineJob?: Schema$GoogleCloudAiplatformV1beta1PipelineJob; + createTime?: string | null; /** - * The ID to use for the PipelineJob, which will become the final component of the PipelineJob name. If not provided, an ID will be automatically generated. This value should be less than 128 characters, and valid characters are `/a-z-/`. + * The user-defined name of the DatasetVersion. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - pipelineJobId?: string | null; + displayName?: string | null; + /** + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + etag?: string | null; + /** + * Required. Output only. Additional information about the DatasetVersion. + */ + metadata?: any | null; + /** + * Output only. Reference to the public base model last used by the dataset version. Only set for prompt dataset versions. + */ + modelReference?: string | null; + /** + * Output only. The resource name of the DatasetVersion. + */ + name?: string | null; + /** + * Output only. Timestamp when this DatasetVersion was last updated. + */ + updateTime?: string | null; } /** - * Details of operations that perform create FeatureGroup. + * A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateRegistryFeatureOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1DedicatedResources { /** - * Operation metadata for Feature. + * Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + autoscalingMetricSpecs?: Schema$GoogleCloudAiplatformV1beta1AutoscalingMetricSpec[]; + /** + * Required. Immutable. The specification of a single machine used by the prediction. + */ + machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; + /** + * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). + */ + maxReplicaCount?: number | null; + /** + * Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. + */ + minReplicaCount?: number | null; } /** - * Runtime operation information for SolverService.CreateSolver. + * Details of operations that delete Feature values. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateSolverOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesOperationMetadata { /** - * The generic operation information. + * Operation metadata for Featurestore delete Features values. */ genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Runtime operation information for SpecialistPoolService.CreateSpecialistPool. + * Request message for FeaturestoreService.DeleteFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateSpecialistPoolOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequest { /** - * The operation generic information. + * Select feature values to be deleted by specifying entities. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + selectEntity?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectEntity; + /** + * Select feature values to be deleted by specifying time range and features. + */ + selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectTimeRangeAndFeature; } /** - * Details of operations that perform create Tensorboard. + * Message to select entity. If an entity id is selected, all the feature values corresponding to the entity id will be deleted, including the entityId. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateTensorboardOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectEntity { /** - * Operation metadata for Tensorboard. + * Required. Selectors choosing feature values of which entity id to be deleted from the EntityType. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + entityIdSelector?: Schema$GoogleCloudAiplatformV1beta1EntityIdSelector; } /** - * Request message for TensorboardService.CreateTensorboardRun. + * Message to select time range and feature. Values of the selected feature generated within an inclusive time range will be deleted. Using this option permanently deletes the feature values from the specified feature IDs within the specified time range. This might include data from the online storage. If you want to retain any deleted historical data in the online storage, you must re-ingest it. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateTensorboardRunRequest { + export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { /** - * Required. The resource name of the TensorboardExperiment to create the TensorboardRun in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` + * Required. Selectors choosing which feature values to be deleted from the EntityType. */ - parent?: string | null; + featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; /** - * Required. The TensorboardRun to create. + * If set, data will not be deleted from online storage. When time range is older than the data in online storage, setting this to be true will make the deletion have no impact on online serving. */ - tensorboardRun?: Schema$GoogleCloudAiplatformV1beta1TensorboardRun; + skipOnlineStorageDelete?: boolean | null; /** - * Required. The ID to use for the Tensorboard run, which becomes the final component of the Tensorboard run's resource name. This value should be 1-128 characters, and valid characters are `/a-z-/`. + * Required. Select feature generated within a half-inclusive time range. The time range is lower inclusive and upper exclusive. */ - tensorboardRunId?: string | null; + timeRange?: Schema$GoogleTypeInterval; } /** - * Request message for TensorboardService.CreateTensorboardTimeSeries. + * Response message for FeaturestoreService.DeleteFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1beta1CreateTensorboardTimeSeriesRequest { - /** - * Required. The resource name of the TensorboardRun to create the TensorboardTimeSeries in. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` - */ - parent?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponse { /** - * Required. The TensorboardTimeSeries to create. + * Response for request specifying the entities to delete */ - tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries; + selectEntity?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectEntity; /** - * Optional. The user specified unique ID to use for the TensorboardTimeSeries, which becomes the final component of the TensorboardTimeSeries's resource name. This value should match "a-z0-9{0, 127\}" + * Response for request specifying time range and feature */ - tensorboardTimeSeriesId?: string | null; + selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectTimeRangeAndFeature; } /** - * The storage details for CSV output content. + * Response message if the request uses the SelectEntity option. */ - export interface Schema$GoogleCloudAiplatformV1beta1CsvDestination { + export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectEntity { /** - * Required. Google Cloud Storage location. + * The count of deleted entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + offlineStorageDeletedEntityRowCount?: string | null; + /** + * The count of deleted entities in the online storage. Each entity ID corresponds to one entity. + */ + onlineStorageDeletedEntityCount?: string | null; } /** - * The storage details for CSV input content. + * Response message if the request uses the SelectTimeRangeAndFeature option. */ - export interface Schema$GoogleCloudAiplatformV1beta1CsvSource { + export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { /** - * Required. Google Cloud Storage location. + * The count of the features or columns impacted. This is the same as the feature count in the request. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + impactedFeatureCount?: string | null; + /** + * The count of modified entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. Within each row, only the features specified in the request are deleted. + */ + offlineStorageModifiedEntityRowCount?: string | null; + /** + * The count of modified entities in the online storage. Each entity ID corresponds to one entity. Within each entity, only the features specified in the request are deleted. + */ + onlineStorageModifiedEntityCount?: string | null; } /** - * Represents a job that runs custom workloads such as a Docker container or a Python package. A CustomJob can have multiple worker pools and each worker pool can have its own machine and input spec. A CustomJob will be cleaned up once the job enters terminal state (failed or succeeded). + * Details of operations that perform MetadataService.DeleteMetadataStore. */ - export interface Schema$GoogleCloudAiplatformV1beta1CustomJob { + export interface Schema$GoogleCloudAiplatformV1beta1DeleteMetadataStoreOperationMetadata { /** - * Output only. Time when the CustomJob was created. + * Operation metadata for deleting a MetadataStore. */ - createTime?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Details of operations that perform deletes of any entities. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeleteOperationMetadata { /** - * Required. The display name of the CustomJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * The common part of the operation metadata. */ - displayName?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * A deployment of an Index. IndexEndpoints contain one or more DeployedIndexes. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndex { /** - * Customer-managed encryption key options for a CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key. + * Optional. A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2 (we don't provide SLA when min_replica_count=1). If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + automaticResources?: Schema$GoogleCloudAiplatformV1beta1AutomaticResources; /** - * Output only. Time when the CustomJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + * Output only. Timestamp when the DeployedIndex was created. */ - endTime?: string | null; + createTime?: string | null; /** - * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + * Optional. A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field min_replica_count must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when min_replica_count=1. If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency. */ - error?: Schema$GoogleRpcStatus; + dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; /** - * Required. Job spec. + * Optional. If set, the authentication is enabled for the private endpoint. */ - jobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; + deployedIndexAuthConfig?: Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfig; /** - * The labels with user-defined metadata to organize CustomJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Optional. The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. Creating `deployment_groups` with `reserved_ip_ranges` is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. Note: we only support up to 5 deployment groups(not including 'default'). */ - labels?: {[key: string]: string} | null; + deploymentGroup?: string | null; /** - * Output only. Resource name of a CustomJob. + * The display name of the DeployedIndex. If not provided upon creation, the Index's display_name is used. */ - name?: string | null; + displayName?: string | null; /** - * Output only. Time when the CustomJob for the first time entered the `JOB_STATE_RUNNING` state. + * Optional. If true, private endpoint's access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each MatchRequest. Note that logs may incur a cost, especially if the deployed index receives a high queries per second rate (QPS). Estimate your costs before enabling this option. */ - startTime?: string | null; + enableAccessLogging?: boolean | null; /** - * Output only. The detailed state of the job. + * Required. The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in. */ - state?: string | null; + id?: string | null; /** - * Output only. Time when the CustomJob was most recently updated. + * Required. The name of the Index this is the deployment of. We may refer to this Index as the DeployedIndex's "original" Index. */ - updateTime?: string | null; + index?: string | null; /** - * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if job_spec.enable_web_access is `true`. The keys are names of each node in the training job; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. + * Output only. The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the Index.update_time of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must list the operations that are running on the original Index. Only the successfully completed Operations with update_time equal or before this sync time are contained in this DeployedIndex. */ - webAccessUris?: {[key: string]: string} | null; + indexSyncTime?: string | null; + /** + * Output only. Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if network is configured. + */ + privateEndpoints?: Schema$GoogleCloudAiplatformV1beta1IndexPrivateEndpoints; + /** + * Optional. A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. + */ + reservedIpRanges?: string[] | null; } /** - * Represents the spec of a CustomJob. + * Used to set up the auth on the DeployedIndex's private endpoint. */ - export interface Schema$GoogleCloudAiplatformV1beta1CustomJobSpec { + export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfig { /** - * The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` + * Defines the authentication provider that the DeployedIndex uses. */ - baseOutputDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + authProvider?: Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfigAuthProvider; + } + /** + * Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfigAuthProvider { /** - * Optional. Whether you want Vertex AI to enable access to the customized dashboard in training chief container. If set to `true`, you can access the dashboard at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + * A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: `service-account-name@project-id.iam.gserviceaccount.com` */ - enableDashboardAccess?: boolean | null; + allowedIssuers?: string[] | null; /** - * Optional. Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If set to `true`, you can access interactive shells at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris (within HyperparameterTuningJob.trials). + * The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. */ - enableWebAccess?: boolean | null; + audiences?: string[] | null; + } + /** + * Points to a DeployedIndex. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndexRef { /** - * Optional. The Experiment associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}` + * Immutable. The ID of the DeployedIndex in the above IndexEndpoint. */ - experiment?: string | null; + deployedIndexId?: string | null; /** - * Optional. The Experiment Run associated with this job. Format: `projects/{project\}/locations/{location\}/metadataStores/{metadataStores\}/contexts/{experiment-name\}-{experiment-run-name\}` + * Output only. The display name of the DeployedIndex. */ - experimentRun?: string | null; + displayName?: string | null; /** - * Optional. The name of the Model resources for which to generate a mapping to artifact URIs. Applicable only to some of the Google-provided custom jobs. Format: `projects/{project\}/locations/{location\}/models/{model\}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. + * Immutable. A resource name of the IndexEndpoint. */ - models?: string[] | null; + indexEndpoint?: string | null; + } + /** + * A deployment of a Model. Endpoints contain one or more DeployedModels. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployedModel { /** - * Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. + * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. */ - network?: string | null; + automaticResources?: Schema$GoogleCloudAiplatformV1beta1AutomaticResources; /** - * Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. + * Output only. Timestamp when the DeployedModel was created. */ - persistentResourceId?: string | null; + createTime?: string | null; /** - * The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + * A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. */ - protectedArtifactLocationId?: string | null; + dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; /** - * Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + * If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. */ - reservedIpRanges?: string[] | null; + disableExplanations?: boolean | null; /** - * Scheduling options for a CustomJob. + * The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. */ - scheduling?: Schema$GoogleCloudAiplatformV1beta1Scheduling; + displayName?: string | null; /** - * Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. If unspecified, the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project is used. + * If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. */ - serviceAccount?: string | null; + enableAccessLogging?: boolean | null; /** - * Optional. The name of a Vertex AI Tensorboard resource to which this CustomJob will upload Tensorboard logs. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` + * If true, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging. Only supported for custom-trained Models and AutoML Tabular Models. */ - tensorboard?: string | null; + enableContainerLogging?: boolean | null; /** - * Required. The spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. + * Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. */ - workerPoolSpecs?: Schema$GoogleCloudAiplatformV1beta1WorkerPoolSpec[]; - } - /** - * A piece of data in a Dataset. Could be an image, a video, a document or plain text. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DataItem { + explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; /** - * Output only. Timestamp when this DataItem was created. + * Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are `/[0-9]/`. */ - createTime?: string | null; + id?: string | null; /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Required. The resource name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. The resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. */ - etag?: string | null; + model?: string | null; /** - * Optional. The labels with user-defined metadata to organize your DataItems. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one DataItem(System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Output only. The version ID of the model that is deployed. */ - labels?: {[key: string]: string} | null; + modelVersionId?: string | null; /** - * Output only. The resource name of the DataItem. + * Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured. */ - name?: string | null; + privateEndpoints?: Schema$GoogleCloudAiplatformV1beta1PrivateEndpoints; /** - * Required. The data that the DataItem represents (for example, an image or a text snippet). The schema of the payload is stored in the parent Dataset's metadata schema's dataItemSchemaUri field. + * The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. */ - payload?: any | null; + serviceAccount?: string | null; /** - * Output only. Timestamp when this DataItem was last updated. + * The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` */ - updateTime?: string | null; + sharedResources?: string | null; } /** - * A container for a single DataItem and Annotations on it. + * Points to a DeployedModel. */ - export interface Schema$GoogleCloudAiplatformV1beta1DataItemView { - /** - * The Annotations on the DataItem. If too many Annotations should be returned for the DataItem, this field will be truncated per annotations_limit in request. If it was, then the has_truncated_annotations will be set to true. - */ - annotations?: Schema$GoogleCloudAiplatformV1beta1Annotation[]; + export interface Schema$GoogleCloudAiplatformV1beta1DeployedModelRef { /** - * The DataItem. + * Immutable. An ID of a DeployedModel in the above Endpoint. */ - dataItem?: Schema$GoogleCloudAiplatformV1beta1DataItem; + deployedModelId?: string | null; /** - * True if and only if the Annotations field has been truncated. It happens if more Annotations for this DataItem met the request's annotation_filter than are allowed to be returned by annotations_limit. Note that if Annotations field is not being returned due to field mask, then this field will not be set to true no matter how many Annotations are there. + * Immutable. A resource name of an Endpoint. */ - hasTruncatedAnnotations?: boolean | null; + endpoint?: string | null; } /** - * DataLabelingJob is used to trigger a human labeling job on unlabeled data from the following Dataset: + * Runtime operation information for IndexEndpointService.DeployIndex. */ - export interface Schema$GoogleCloudAiplatformV1beta1DataLabelingJob { + export interface Schema$GoogleCloudAiplatformV1beta1DeployIndexOperationMetadata { /** - * Parameters that configure the active learning pipeline. Active learning will label the data incrementally via several iterations. For every iteration, it will select a batch of data based on the sampling strategy. + * The unique index id specified by user */ - activeLearningConfig?: Schema$GoogleCloudAiplatformV1beta1ActiveLearningConfig; + deployedIndexId?: string | null; /** - * Labels to assign to annotations generated by this DataLabelingJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * The operation generic information. */ - annotationLabels?: {[key: string]: string} | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Request message for IndexEndpointService.DeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployIndexRequest { /** - * Output only. Timestamp when this DataLabelingJob was created. + * Required. The DeployedIndex to be created within the IndexEndpoint. */ - createTime?: string | null; + deployedIndex?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex; + } + /** + * Response message for IndexEndpointService.DeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployIndexResponse { /** - * Output only. Estimated cost(in US dollars) that the DataLabelingJob has incurred to date. + * The DeployedIndex that had been deployed in the IndexEndpoint. */ - currentSpend?: Schema$GoogleTypeMoney; + deployedIndex?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex; + } + /** + * A description of resources that can be shared by multiple DeployedModels, whose underlying specification consists of a DedicatedResources. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeploymentResourcePool { /** - * Required. Dataset resource names. Right now we only support labeling from a single Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}` + * Output only. Timestamp when this DeploymentResourcePool was created. */ - datasets?: string[] | null; + createTime?: string | null; /** - * Required. The user-defined name of the DataLabelingJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a DataLabelingJob. + * Required. The underlying DedicatedResources that the DeploymentResourcePool uses. */ - displayName?: string | null; + dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; /** - * Customer-managed encryption key spec for a DataLabelingJob. If set, this DataLabelingJob will be secured by this key. Note: Annotations created in the DataLabelingJob are associated with the EncryptionSpec of the Dataset they are exported to. + * Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + name?: string | null; + } + /** + * Runtime operation information for EndpointService.DeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployModelOperationMetadata { /** - * Output only. DataLabelingJob errors. It is only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + * The operation generic information. */ - error?: Schema$GoogleRpcStatus; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Request message for EndpointService.DeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployModelRequest { /** - * Required. Input config parameters for the DataLabelingJob. + * Required. The DeployedModel to be created within the Endpoint. Note that Endpoint.traffic_split must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via EndpointService.UpdateEndpoint. */ - inputs?: any | null; + deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; /** - * Required. Points to a YAML file stored on Google Cloud Storage describing the config for a specific type of DataLabelingJob. The schema files that can be used here are found in the https://storage.googleapis.com/google-cloud-aiplatform bucket in the /schema/datalabelingjob/inputs/ folder. + * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's traffic_split will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by this method. The traffic percentage values must add up to 100. If this field is empty, then the Endpoint's traffic_split is not updated. */ - inputsSchemaUri?: string | null; + trafficSplit?: {[key: string]: number} | null; + } + /** + * Response message for EndpointService.DeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeployModelResponse { /** - * Required. The Google Cloud Storage location of the instruction pdf. This pdf is shared with labelers, and provides detailed description on how to label DataItems in Datasets. + * The DeployedModel that had been deployed in the Endpoint. */ - instructionUri?: string | null; + deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; + } + /** + * Runtime operation information for SolverService.DeploySolver. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DeploySolverOperationMetadata { /** - * Required. Number of labelers to work on each DataItem. + * The generic operation information. */ - labelerCount?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + export interface Schema$GoogleCloudAiplatformV1beta1DestinationFeatureSetting { /** - * Output only. Current labeling job progress percentage scaled in interval [0, 100], indicating the percentage of DataItems that has been finished. + * Specify the field name in the export destination. If not specified, Feature ID is used. */ - labelingProgress?: number | null; + destinationField?: string | null; /** - * The labels with user-defined metadata to organize your DataLabelingJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each DataLabelingJob: * "aiplatform.googleapis.com/schema": output only, its value is the inputs_schema's title. + * Required. The ID of the Feature to apply the setting to. */ - labels?: {[key: string]: string} | null; + featureId?: string | null; + } + /** + * Request message for PredictionService.DirectPredict. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DirectPredictRequest { /** - * Output only. Resource name of the DataLabelingJob. + * The prediction input. */ - name?: string | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; /** - * The SpecialistPools' resource names associated with this job. + * The parameters that govern the prediction. */ - specialistPools?: string[] | null; + parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; + } + /** + * Response message for PredictionService.DirectPredict. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DirectPredictResponse { /** - * Output only. The detailed state of the job. + * The prediction output. */ - state?: string | null; + outputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; /** - * Output only. Timestamp when this DataLabelingJob was updated most recently. + * The parameters that govern the prediction. */ - updateTime?: string | null; + parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; } /** - * A collection of DataItems and Annotations on them. + * Request message for PredictionService.DirectRawPredict. */ - export interface Schema$GoogleCloudAiplatformV1beta1Dataset { + export interface Schema$GoogleCloudAiplatformV1beta1DirectRawPredictRequest { /** - * Output only. Timestamp when this Dataset was created. + * The prediction input. + */ + input?: string | null; + /** + * Fully qualified name of the API method being invoked to perform predictions. Format: `/namespace.Service/Method/` Example: `/tensorflow.serving.PredictionService/Predict` + */ + methodName?: string | null; + } + /** + * Response message for PredictionService.DirectRawPredict. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DirectRawPredictResponse { + /** + * The prediction output. + */ + output?: string | null; + } + /** + * The input content is encapsulated and uploaded in the request. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DirectUploadSource {} + /** + * Represents the spec of disk options. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DiskSpec { + /** + * Size in GB of the boot disk (default is 100GB). + */ + bootDiskSizeGb?: number | null; + /** + * Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + */ + bootDiskType?: string | null; + } + /** + * A list of double values. + */ + export interface Schema$GoogleCloudAiplatformV1beta1DoubleArray { + /** + * A list of double values. + */ + values?: number[] | null; + } + /** + * Represents a customer-managed encryption key spec that can be applied to a top-level resource. + */ + export interface Schema$GoogleCloudAiplatformV1beta1EncryptionSpec { + /** + * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + */ + kmsKeyName?: string | null; + } + /** + * Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Endpoint { + /** + * Output only. Timestamp when this Endpoint was created. */ createTime?: string | null; /** - * Output only. The number of DataItems in this Dataset. Only apply for non-structured Dataset. + * Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. */ - dataItemCount?: string | null; + deployedModels?: Schema$GoogleCloudAiplatformV1beta1DeployedModel[]; /** - * The description of the Dataset. + * The description of the Endpoint. */ description?: string | null; /** - * Required. The user-defined name of the Dataset. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string | null; /** - * Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. + * Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + */ + enablePrivateServiceConnect?: boolean | null; + /** + * Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. */ encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** @@ -2761,9772 +2829,9587 @@ export namespace aiplatform_v1beta1 { */ etag?: string | null; /** - * The labels with user-defined metadata to organize your Datasets. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Dataset: * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its value is the metadata_schema's title. + * The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ labels?: {[key: string]: string} | null; /** - * Required. Additional information about the Dataset. + * Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project\}/locations/{location\}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job\}` */ - metadata?: any | null; + modelDeploymentMonitoringJob?: string | null; /** - * Output only. The resource name of the Artifact that was created in MetadataStore when creating the Dataset. The Artifact resource name pattern is `projects/{project\}/locations/{location\}/metadataStores/{metadata_store\}/artifacts/{artifact\}`. + * Output only. The resource name of the Endpoint. */ - metadataArtifact?: string | null; + name?: string | null; /** - * Required. Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/. + * Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project\}/global/networks/{network\}`. Where `{project\}` is a project number, as in `12345`, and `{network\}` is network name. */ - metadataSchemaUri?: string | null; + network?: string | null; /** - * Optional. Reference to the public base model last used by the dataset. Only set for prompt datasets. + * Configures the request-response logging for online prediction. */ - modelReference?: string | null; + predictRequestResponseLoggingConfig?: Schema$GoogleCloudAiplatformV1beta1PredictRequestResponseLoggingConfig; /** - * Output only. The resource name of the Dataset. + * Optional. Configuration for private service connect. network and private_service_connect_config are mutually exclusive. */ - name?: string | null; + privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig; /** - * All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will not be populated except for UI cases which will only use annotation_spec_count. In CreateDataset request, a SavedQuery is created together if this field is set, up to one SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not contain any AnnotationSpec. + * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. */ - savedQueries?: Schema$GoogleCloudAiplatformV1beta1SavedQuery[]; + trafficSplit?: {[key: string]: number} | null; /** - * Output only. Timestamp when this Dataset was last updated. + * Output only. Timestamp when this Endpoint was last updated. */ updateTime?: string | null; } /** - * Describes the dataset version. + * Selector for entityId. Getting ids from the given source. */ - export interface Schema$GoogleCloudAiplatformV1beta1DatasetVersion { + export interface Schema$GoogleCloudAiplatformV1beta1EntityIdSelector { /** - * Output only. Name of the associated BigQuery dataset. + * Source of Csv */ - bigQueryDatasetName?: string | null; + csvSource?: Schema$GoogleCloudAiplatformV1beta1CsvSource; /** - * Output only. Timestamp when this DatasetVersion was created. + * Source column that holds entity IDs. If not provided, entity IDs are extracted from the column named entity_id. + */ + entityIdField?: string | null; + } + /** + * An entity type is a type of object in a system that needs to be modeled and have stored information about. For example, driver is an entity type, and driver0 is an instance of an entity type driver. + */ + export interface Schema$GoogleCloudAiplatformV1beta1EntityType { + /** + * Output only. Timestamp when this EntityType was created. */ createTime?: string | null; /** - * The user-defined name of the DatasetVersion. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Optional. Description of the EntityType. */ - displayName?: string | null; + description?: string | null; /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Optional. Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ etag?: string | null; /** - * Required. Output only. Additional information about the DatasetVersion. + * Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one EntityType (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - metadata?: any | null; + labels?: {[key: string]: string} | null; /** - * Output only. Reference to the public base model last used by the dataset version. Only set for prompt dataset versions. + * Optional. The default monitoring configuration for all Features with value type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 under this EntityType. If this is populated with [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is disabled. */ - modelReference?: string | null; + monitoringConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfig; /** - * Output only. The resource name of the DatasetVersion. + * Immutable. Name of the EntityType. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. */ name?: string | null; /** - * Output only. Timestamp when this DatasetVersion was last updated. + * Optional. Config for data retention policy in offline storage. TTL in days for feature values that will be stored in offline storage. The Feature Store offline storage periodically removes obsolete feature values older than `offline_storage_ttl_days` since the feature generation time. If unset (or explicitly set to 0), default to 4000 days TTL. + */ + offlineStorageTtlDays?: number | null; + /** + * Output only. Timestamp when this EntityType was most recently updated. */ updateTime?: string | null; } /** - * A description of resources that are dedicated to a DeployedModel, and that need a higher degree of manual configuration. + * Represents an environment variable present in a Container or Python Module. */ - export interface Schema$GoogleCloudAiplatformV1beta1DedicatedResources { - /** - * Immutable. The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. - */ - autoscalingMetricSpecs?: Schema$GoogleCloudAiplatformV1beta1AutoscalingMetricSpec[]; - /** - * Required. Immutable. The specification of a single machine used by the prediction. - */ - machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; + export interface Schema$GoogleCloudAiplatformV1beta1EnvVar { /** - * Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). + * Required. Name of the environment variable. Must be a valid C identifier. */ - maxReplicaCount?: number | null; + name?: string | null; /** - * Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. + * Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. */ - minReplicaCount?: number | null; + value?: string | null; } /** - * Details of operations that delete Feature values. + * Model error analysis for each annotation. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotation { /** - * Operation metadata for Featurestore delete Features values. + * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for FeaturestoreService.DeleteFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequest { + attributedItems?: Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotationAttributedItem[]; /** - * Select feature values to be deleted by specifying entities. + * The outlier score of this annotated item. Usually defined as the min of all distances from attributed items. */ - selectEntity?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectEntity; + outlierScore?: number | null; /** - * Select feature values to be deleted by specifying time range and features. + * The threshold used to determine if this annotation is an outlier or not. */ - selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectTimeRangeAndFeature; - } - /** - * Message to select entity. If an entity id is selected, all the feature values corresponding to the entity id will be deleted, including the entityId. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectEntity { + outlierThreshold?: number | null; /** - * Required. Selectors choosing feature values of which entity id to be deleted from the EntityType. + * The query type used for finding the attributed items. */ - entityIdSelector?: Schema$GoogleCloudAiplatformV1beta1EntityIdSelector; + queryType?: string | null; } /** - * Message to select time range and feature. Values of the selected feature generated within an inclusive time range will be deleted. Using this option permanently deletes the feature values from the specified feature IDs within the specified time range. This might include data from the online storage. If you want to retain any deleted historical data in the online storage, you must re-ingest it. + * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { - /** - * Required. Selectors choosing which feature values to be deleted from the EntityType. - */ - featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; + export interface Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotationAttributedItem { /** - * If set, data will not be deleted from online storage. When time range is older than the data in online storage, setting this to be true will make the deletion have no impact on online serving. + * The unique ID for each annotation. Used by FE to allocate the annotation in DB. */ - skipOnlineStorageDelete?: boolean | null; + annotationResourceName?: string | null; /** - * Required. Select feature generated within a half-inclusive time range. The time range is lower inclusive and upper exclusive. + * The distance of this item to the annotation. */ - timeRange?: Schema$GoogleTypeInterval; + distance?: number | null; } /** - * Response message for FeaturestoreService.DeleteFeatureValues. + * True positive, false positive, or false negative. EvaluatedAnnotation is only available under ModelEvaluationSlice with slice of `annotationSpec` dimension. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotation { /** - * Response for request specifying the entities to delete + * Output only. The data item payload that the Model predicted this EvaluatedAnnotation on. */ - selectEntity?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectEntity; + dataItemPayload?: any | null; /** - * Response for request specifying time range and feature + * Annotations of model error analysis results. */ - selectTimeRangeAndFeature?: Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectTimeRangeAndFeature; - } - /** - * Response message if the request uses the SelectEntity option. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectEntity { + errorAnalysisAnnotations?: Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotation[]; /** - * The count of deleted entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. + * Output only. ID of the EvaluatedDataItemView under the same ancestor ModelEvaluation. The EvaluatedDataItemView consists of all ground truths and predictions on data_item_payload. */ - offlineStorageDeletedEntityRowCount?: string | null; + evaluatedDataItemViewId?: string | null; /** - * The count of deleted entities in the online storage. Each entity ID corresponds to one entity. + * Explanations of predictions. Each element of the explanations indicates the explanation for one explanation Method. The attributions list in the EvaluatedAnnotationExplanation.explanation object corresponds to the predictions list. For example, the second element in the attributions list explains the second element in the predictions list. */ - onlineStorageDeletedEntityCount?: string | null; - } - /** - * Response message if the request uses the SelectTimeRangeAndFeature option. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { + explanations?: Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotationExplanation[]; /** - * The count of the features or columns impacted. This is the same as the feature count in the request. + * Output only. The ground truth Annotations, i.e. the Annotations that exist in the test data the Model is evaluated on. For true positive, there is one and only one ground truth annotation, which matches the only prediction in predictions. For false positive, there are zero or more ground truth annotations that are similar to the only prediction in predictions, but not enough for a match. For false negative, there is one and only one ground truth annotation, which doesn't match any predictions created by the model. The schema of the ground truth is stored in ModelEvaluation.annotation_schema_uri */ - impactedFeatureCount?: string | null; + groundTruths?: any[] | null; /** - * The count of modified entity rows in the offline storage. Each row corresponds to the combination of an entity ID and a timestamp. One entity ID can have multiple rows in the offline storage. Within each row, only the features specified in the request are deleted. + * Output only. The model predicted annotations. For true positive, there is one and only one prediction, which matches the only one ground truth annotation in ground_truths. For false positive, there is one and only one prediction, which doesn't match any ground truth annotation of the corresponding data_item_view_id. For false negative, there are zero or more predictions which are similar to the only ground truth annotation in ground_truths but not enough for a match. The schema of the prediction is stored in ModelEvaluation.annotation_schema_uri */ - offlineStorageModifiedEntityRowCount?: string | null; + predictions?: any[] | null; /** - * The count of modified entities in the online storage. Each entity ID corresponds to one entity. Within each entity, only the features specified in the request are deleted. + * Output only. Type of the EvaluatedAnnotation. */ - onlineStorageModifiedEntityCount?: string | null; + type?: string | null; } /** - * Details of operations that perform MetadataService.DeleteMetadataStore. + * Explanation result of the prediction produced by the Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteMetadataStoreOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotationExplanation { /** - * Operation metadata for deleting a MetadataStore. + * Explanation attribution response details. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform deletes of any entities. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeleteOperationMetadata { + explanation?: Schema$GoogleCloudAiplatformV1beta1Explanation; /** - * The common part of the operation metadata. + * Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + explanationType?: string | null; } /** - * A deployment of an Index. IndexEndpoints contain one or more DeployedIndexes. + * Request message for EvaluationService.EvaluateInstances. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndex { + export interface Schema$GoogleCloudAiplatformV1beta1EvaluateInstancesRequest { /** - * Optional. A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 2 (we don't provide SLA when min_replica_count=1). If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. + * Instances and metric spec for bleu metric. */ - automaticResources?: Schema$GoogleCloudAiplatformV1beta1AutomaticResources; + bleuInput?: Schema$GoogleCloudAiplatformV1beta1BleuInput; /** - * Output only. Timestamp when the DeployedIndex was created. + * Input for coherence metric. */ - createTime?: string | null; + coherenceInput?: Schema$GoogleCloudAiplatformV1beta1CoherenceInput; /** - * Optional. A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field min_replica_count must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when min_replica_count=1. If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency. + * Auto metric instances. Instances and metric spec for exact match metric. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; + exactMatchInput?: Schema$GoogleCloudAiplatformV1beta1ExactMatchInput; /** - * Optional. If set, the authentication is enabled for the private endpoint. + * LLM-based metric instance. General text generation metrics, applicable to other categories. Input for fluency metric. */ - deployedIndexAuthConfig?: Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfig; + fluencyInput?: Schema$GoogleCloudAiplatformV1beta1FluencyInput; /** - * Optional. The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. Creating `deployment_groups` with `reserved_ip_ranges` is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. Note: we only support up to 5 deployment groups(not including 'default'). + * Input for fulfillment metric. */ - deploymentGroup?: string | null; + fulfillmentInput?: Schema$GoogleCloudAiplatformV1beta1FulfillmentInput; /** - * The display name of the DeployedIndex. If not provided upon creation, the Index's display_name is used. + * Input for groundedness metric. */ - displayName?: string | null; + groundednessInput?: Schema$GoogleCloudAiplatformV1beta1GroundednessInput; /** - * Optional. If true, private endpoint's access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each MatchRequest. Note that logs may incur a cost, especially if the deployed index receives a high queries per second rate (QPS). Estimate your costs before enabling this option. + * Input for pairwise question answering quality metric. */ - enableAccessLogging?: boolean | null; + pairwiseQuestionAnsweringQualityInput?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInput; /** - * Required. The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in. + * Input for pairwise summarization quality metric. */ - id?: string | null; + pairwiseSummarizationQualityInput?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInput; /** - * Required. The name of the Index this is the deployment of. We may refer to this Index as the DeployedIndex's "original" Index. + * Input for question answering correctness metric. */ - index?: string | null; + questionAnsweringCorrectnessInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInput; /** - * Output only. The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the Index.update_time of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must list the operations that are running on the original Index. Only the successfully completed Operations with update_time equal or before this sync time are contained in this DeployedIndex. + * Input for question answering helpfulness metric. */ - indexSyncTime?: string | null; + questionAnsweringHelpfulnessInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInput; /** - * Output only. Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if network is configured. + * Input for question answering quality metric. */ - privateEndpoints?: Schema$GoogleCloudAiplatformV1beta1IndexPrivateEndpoints; + questionAnsweringQualityInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInput; /** - * Optional. A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. + * Input for question answering relevance metric. */ - reservedIpRanges?: string[] | null; - } - /** - * Used to set up the auth on the DeployedIndex's private endpoint. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfig { + questionAnsweringRelevanceInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInput; /** - * Defines the authentication provider that the DeployedIndex uses. + * Instances and metric spec for rouge metric. */ - authProvider?: Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfigAuthProvider; - } - /** - * Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndexAuthConfigAuthProvider { + rougeInput?: Schema$GoogleCloudAiplatformV1beta1RougeInput; /** - * A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: `service-account-name@project-id.iam.gserviceaccount.com` + * Input for safety metric. */ - allowedIssuers?: string[] | null; + safetyInput?: Schema$GoogleCloudAiplatformV1beta1SafetyInput; /** - * The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. + * Input for summarization helpfulness metric. */ - audiences?: string[] | null; - } - /** - * Points to a DeployedIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployedIndexRef { + summarizationHelpfulnessInput?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInput; /** - * Immutable. The ID of the DeployedIndex in the above IndexEndpoint. + * Input for summarization quality metric. */ - deployedIndexId?: string | null; + summarizationQualityInput?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInput; /** - * Output only. The display name of the DeployedIndex. + * Input for summarization verbosity metric. */ - displayName?: string | null; + summarizationVerbosityInput?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInput; /** - * Immutable. A resource name of the IndexEndpoint. + * Tool call metric instances. Input for tool call valid metric. */ - indexEndpoint?: string | null; + toolCallValidInput?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidInput; + /** + * Input for tool name match metric. + */ + toolNameMatchInput?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInput; + /** + * Input for tool parameter key match metric. + */ + toolParameterKeyMatchInput?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInput; + /** + * Input for tool parameter key value match metric. + */ + toolParameterKvMatchInput?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInput; } /** - * A deployment of a Model. Endpoints contain one or more DeployedModels. + * Response message for EvaluationService.EvaluateInstances. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployedModel { + export interface Schema$GoogleCloudAiplatformV1beta1EvaluateInstancesResponse { /** - * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. + * Results for bleu metric. */ - automaticResources?: Schema$GoogleCloudAiplatformV1beta1AutomaticResources; + bleuResults?: Schema$GoogleCloudAiplatformV1beta1BleuResults; /** - * Output only. Timestamp when the DeployedModel was created. + * Result for coherence metric. */ - createTime?: string | null; + coherenceResult?: Schema$GoogleCloudAiplatformV1beta1CoherenceResult; /** - * A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. + * Auto metric evaluation results. Results for exact match metric. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; + exactMatchResults?: Schema$GoogleCloudAiplatformV1beta1ExactMatchResults; /** - * If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. + * LLM-based metric evaluation result. General text generation metrics, applicable to other categories. Result for fluency metric. */ - disableExplanations?: boolean | null; + fluencyResult?: Schema$GoogleCloudAiplatformV1beta1FluencyResult; /** - * The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. + * Result for fulfillment metric. */ - displayName?: string | null; + fulfillmentResult?: Schema$GoogleCloudAiplatformV1beta1FulfillmentResult; /** - * If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. + * Result for groundedness metric. */ - enableAccessLogging?: boolean | null; + groundednessResult?: Schema$GoogleCloudAiplatformV1beta1GroundednessResult; /** - * If true, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging. Only supported for custom-trained Models and AutoML Tabular Models. + * Result for pairwise question answering quality metric. */ - enableContainerLogging?: boolean | null; + pairwiseQuestionAnsweringQualityResult?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityResult; /** - * Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. + * Result for pairwise summarization quality metric. */ - explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; + pairwiseSummarizationQualityResult?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityResult; /** - * Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are `/[0-9]/`. + * Result for question answering correctness metric. */ - id?: string | null; + questionAnsweringCorrectnessResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessResult; /** - * Required. The resource name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. The resource name may contain version id or version alias to specify the version. Example: `projects/{project\}/locations/{location\}/models/{model\}@2` or `projects/{project\}/locations/{location\}/models/{model\}@golden` if no version is specified, the default version will be deployed. + * Result for question answering helpfulness metric. */ - model?: string | null; + questionAnsweringHelpfulnessResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessResult; /** - * Output only. The version ID of the model that is deployed. + * Question answering only metrics. Result for question answering quality metric. */ - modelVersionId?: string | null; + questionAnsweringQualityResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityResult; /** - * Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured. + * Result for question answering relevance metric. */ - privateEndpoints?: Schema$GoogleCloudAiplatformV1beta1PrivateEndpoints; + questionAnsweringRelevanceResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceResult; /** - * The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + * Results for rouge metric. */ - serviceAccount?: string | null; + rougeResults?: Schema$GoogleCloudAiplatformV1beta1RougeResults; /** - * The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` + * Result for safety metric. */ - sharedResources?: string | null; - } - /** - * Points to a DeployedModel. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployedModelRef { + safetyResult?: Schema$GoogleCloudAiplatformV1beta1SafetyResult; /** - * Immutable. An ID of a DeployedModel in the above Endpoint. + * Result for summarization helpfulness metric. */ - deployedModelId?: string | null; + summarizationHelpfulnessResult?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessResult; /** - * Immutable. A resource name of an Endpoint. + * Summarization only metrics. Result for summarization quality metric. */ - endpoint?: string | null; - } - /** - * Runtime operation information for IndexEndpointService.DeployIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployIndexOperationMetadata { + summarizationQualityResult?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualityResult; /** - * The unique index id specified by user + * Result for summarization verbosity metric. */ - deployedIndexId?: string | null; + summarizationVerbosityResult?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityResult; /** - * The operation generic information. + * Tool call metrics. Results for tool call valid metric. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for IndexEndpointService.DeployIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployIndexRequest { + toolCallValidResults?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidResults; /** - * Required. The DeployedIndex to be created within the IndexEndpoint. + * Results for tool name match metric. */ - deployedIndex?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex; - } - /** - * Response message for IndexEndpointService.DeployIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployIndexResponse { + toolNameMatchResults?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchResults; /** - * The DeployedIndex that had been deployed in the IndexEndpoint. + * Results for tool parameter key match metric. */ - deployedIndex?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex; + toolParameterKeyMatchResults?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchResults; + /** + * Results for tool parameter key value match metric. + */ + toolParameterKvMatchResults?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchResults; } /** - * A description of resources that can be shared by multiple DeployedModels, whose underlying specification consists of a DedicatedResources. + * An edge describing the relationship between an Artifact and an Execution in a lineage graph. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeploymentResourcePool { + export interface Schema$GoogleCloudAiplatformV1beta1Event { /** - * Output only. Timestamp when this DeploymentResourcePool was created. + * Required. The relative resource name of the Artifact in the Event. */ - createTime?: string | null; + artifact?: string | null; /** - * Required. The underlying DedicatedResources that the DeploymentResourcePool uses. + * Output only. Time the Event occurred. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; + eventTime?: string | null; /** - * Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` + * Output only. The relative resource name of the Execution in the Event. */ - name?: string | null; + execution?: string | null; + /** + * The labels with user-defined metadata to annotate Events. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Event (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + */ + labels?: {[key: string]: string} | null; + /** + * Required. The type of the Event. + */ + type?: string | null; } /** - * Runtime operation information for EndpointService.DeployModel. + * Input for exact match metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployModelOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchInput { /** - * The operation generic information. + * Required. Repeated exact match instances. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + instances?: Schema$GoogleCloudAiplatformV1beta1ExactMatchInstance[]; + /** + * Required. Spec for exact match metric. + */ + metricSpec?: Schema$GoogleCloudAiplatformV1beta1ExactMatchSpec; } /** - * Request message for EndpointService.DeployModel. + * Spec for exact match instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployModelRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchInstance { /** - * Required. The DeployedModel to be created within the Endpoint. Note that Endpoint.traffic_split must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via EndpointService.UpdateEndpoint. + * Required. Output of the evaluated model. */ - deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; + prediction?: string | null; /** - * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's traffic_split will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by this method. The traffic percentage values must add up to 100. If this field is empty, then the Endpoint's traffic_split is not updated. + * Required. Ground truth used to compare against the prediction. */ - trafficSplit?: {[key: string]: number} | null; + reference?: string | null; } /** - * Response message for EndpointService.DeployModel. + * Exact match metric value for an instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeployModelResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchMetricValue { /** - * The DeployedModel that had been deployed in the Endpoint. + * Output only. Exact match score. */ - deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; + score?: number | null; } /** - * Runtime operation information for SolverService.DeploySolver. + * Results for exact match metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1DeploySolverOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchResults { /** - * The generic operation information. + * Output only. Exact match metric values. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + exactMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ExactMatchMetricValue[]; } - export interface Schema$GoogleCloudAiplatformV1beta1DestinationFeatureSetting { + /** + * Spec for exact match metric - returns 1 if prediction and reference exactly matches, otherwise 0. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchSpec {} + /** + * Example-based explainability that returns the nearest neighbors from the provided dataset. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Examples { /** - * Specify the field name in the export destination. If not specified, Feature ID is used. + * The Cloud Storage input instances. */ - destinationField?: string | null; + exampleGcsSource?: Schema$GoogleCloudAiplatformV1beta1ExamplesExampleGcsSource; /** - * Required. The ID of the Feature to apply the setting to. + * The Cloud Storage locations that contain the instances to be indexed for approximate nearest neighbor search. */ - featureId?: string | null; - } - /** - * Request message for PredictionService.DirectPredict. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DirectPredictRequest { + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; /** - * The prediction input. + * The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). */ - inputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; + nearestNeighborSearchConfig?: any | null; /** - * The parameters that govern the prediction. + * The number of neighbors to return when querying for examples. */ - parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; + neighborCount?: number | null; + /** + * Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality. + */ + presets?: Schema$GoogleCloudAiplatformV1beta1Presets; } /** - * Response message for PredictionService.DirectPredict. + * The Cloud Storage input instances. */ - export interface Schema$GoogleCloudAiplatformV1beta1DirectPredictResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ExamplesExampleGcsSource { /** - * The prediction output. + * The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. */ - outputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; + dataFormat?: string | null; /** - * The parameters that govern the prediction. + * The Cloud Storage location for the input instances. */ - parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; } /** - * Request message for PredictionService.DirectRawPredict. + * Overrides for example-based explanations. */ - export interface Schema$GoogleCloudAiplatformV1beta1DirectRawPredictRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ExamplesOverride { /** - * The prediction input. + * The number of neighbors to return that have the same crowding tag. */ - input?: string | null; + crowdingCount?: number | null; /** - * Fully qualified name of the API method being invoked to perform predictions. Format: `/namespace.Service/Method/` Example: `/tensorflow.serving.PredictionService/Predict` + * The format of the data being provided with each call. */ - methodName?: string | null; - } - /** - * Response message for PredictionService.DirectRawPredict. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DirectRawPredictResponse { + dataFormat?: string | null; /** - * The prediction output. + * The number of neighbors to return. */ - output?: string | null; + neighborCount?: number | null; + /** + * Restrict the resulting nearest neighbors to respect these constraints. + */ + restrictions?: Schema$GoogleCloudAiplatformV1beta1ExamplesRestrictionsNamespace[]; + /** + * If true, return the embeddings instead of neighbors. + */ + returnEmbeddings?: boolean | null; } /** - * The input content is encapsulated and uploaded in the request. - */ - export interface Schema$GoogleCloudAiplatformV1beta1DirectUploadSource {} - /** - * Represents the spec of disk options. + * Restrictions namespace for example-based explanations overrides. */ - export interface Schema$GoogleCloudAiplatformV1beta1DiskSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ExamplesRestrictionsNamespace { /** - * Size in GB of the boot disk (default is 100GB). + * The list of allowed tags. */ - bootDiskSizeGb?: number | null; + allow?: string[] | null; /** - * Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + * The list of deny tags. */ - bootDiskType?: string | null; + deny?: string[] | null; + /** + * The namespace name. + */ + namespaceName?: string | null; } /** - * A list of double values. + * Request message for ExtensionExecutionService.ExecuteExtension. */ - export interface Schema$GoogleCloudAiplatformV1beta1DoubleArray { + export interface Schema$GoogleCloudAiplatformV1beta1ExecuteExtensionRequest { /** - * A list of double values. + * Required. The desired ID of the operation to be executed in this extension as defined in ExtensionOperation.operation_id. */ - values?: number[] | null; + operationId?: string | null; + /** + * Optional. Request parameters that will be used for executing this operation. The struct should be in a form of map with param name as the key and actual param value as the value. E.g. If this operation requires a param "name" to be set to "abc". you can set this to something like {"name": "abc"\}. + */ + operationParams?: {[key: string]: any} | null; + /** + * Optional. Auth config provided at runtime to override the default value in Extension.manifest.auth_config. The AuthConfig.auth_type should match the value in Extension.manifest.auth_config. + */ + runtimeAuthConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfig; } /** - * Represents a customer-managed encryption key spec that can be applied to a top-level resource. + * Response message for ExtensionExecutionService.ExecuteExtension. */ - export interface Schema$GoogleCloudAiplatformV1beta1EncryptionSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ExecuteExtensionResponse { /** - * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + * Response content from the extension. The content should be conformant to the response.content schema in the extension's manifest/OpenAPI spec. */ - kmsKeyName?: string | null; + content?: string | null; } /** - * Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + * Instance of a general execution. */ - export interface Schema$GoogleCloudAiplatformV1beta1Endpoint { + export interface Schema$GoogleCloudAiplatformV1beta1Execution { /** - * Output only. Timestamp when this Endpoint was created. + * Output only. Timestamp when this Execution was created. */ createTime?: string | null; /** - * Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. - */ - deployedModels?: Schema$GoogleCloudAiplatformV1beta1DeployedModel[]; - /** - * The description of the Endpoint. + * Description of the Execution */ description?: string | null; /** - * Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * User provided display name of the Execution. May be up to 128 Unicode characters. */ displayName?: string | null; /** - * Deprecated: If true, expose the Endpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. - */ - enablePrivateServiceConnect?: boolean | null; - /** - * Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. - */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; - /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ etag?: string | null; /** - * The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). */ labels?: {[key: string]: string} | null; /** - * Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by JobService.CreateModelDeploymentMonitoringJob. Format: `projects/{project\}/locations/{location\}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job\}` + * Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. */ - modelDeploymentMonitoringJob?: string | null; + metadata?: {[key: string]: any} | null; /** - * Output only. The resource name of the Endpoint. + * Output only. The resource name of the Execution. */ name?: string | null; /** - * Optional. The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project\}/global/networks/{network\}`. Where `{project\}` is a project number, as in `12345`, and `{network\}` is network name. - */ - network?: string | null; - /** - * Configures the request-response logging for online prediction. + * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - predictRequestResponseLoggingConfig?: Schema$GoogleCloudAiplatformV1beta1PredictRequestResponseLoggingConfig; + schemaTitle?: string | null; /** - * Optional. Configuration for private service connect. network and private_service_connect_config are mutually exclusive. + * The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. */ - privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig; + schemaVersion?: string | null; /** - * A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If a DeployedModel's ID is not listed in this map, then it receives no traffic. The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment. + * The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. */ - trafficSplit?: {[key: string]: number} | null; + state?: string | null; /** - * Output only. Timestamp when this Endpoint was last updated. + * Output only. Timestamp when this Execution was last updated. */ updateTime?: string | null; } /** - * Selector for entityId. Getting ids from the given source. + * Request message for PredictionService.Explain. */ - export interface Schema$GoogleCloudAiplatformV1beta1EntityIdSelector { + export interface Schema$GoogleCloudAiplatformV1beta1ExplainRequest { /** - * Source of Csv + * Optional. This field is the same as the one above, but supports multiple explanations to occur in parallel. The key can be any string. Each override will be run against the model, then its explanations will be grouped together. Note - these explanations are run **In Addition** to the default Explanation in the deployed model. */ - csvSource?: Schema$GoogleCloudAiplatformV1beta1CsvSource; + concurrentExplanationSpecOverride?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1ExplanationSpecOverride; + } | null; /** - * Source column that holds entity IDs. If not provided, entity IDs are extracted from the column named entity_id. + * If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding Endpoint.traffic_split. */ - entityIdField?: string | null; - } - /** - * An entity type is a type of object in a system that needs to be modeled and have stored information about. For example, driver is an entity type, and driver0 is an instance of an entity type driver. - */ - export interface Schema$GoogleCloudAiplatformV1beta1EntityType { + deployedModelId?: string | null; /** - * Output only. Timestamp when this EntityType was created. + * If specified, overrides the explanation_spec of the DeployedModel. Can be used for explaining prediction results with different configurations, such as: - Explaining top-5 predictions results as opposed to top-1; - Increasing path count or step count of the attribution methods to reduce approximate errors; - Using different baselines for explaining the prediction results. */ - createTime?: string | null; + explanationSpecOverride?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpecOverride; /** - * Optional. Description of the EntityType. + * Required. The instances that are the input to the explanation call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the explanation call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. */ - description?: string | null; + instances?: any[] | null; /** - * Optional. Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' Model's PredictSchemata's parameters_schema_uri. */ - etag?: string | null; + parameters?: any | null; + } + /** + * Response message for PredictionService.Explain. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplainResponse { /** - * Optional. The labels with user-defined metadata to organize your EntityTypes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one EntityType (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * This field stores the results of the explanations run in parallel with The default explanation strategy/method. */ - labels?: {[key: string]: string} | null; + concurrentExplanations?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ExplainResponseConcurrentExplanation; + } | null; /** - * Optional. The default monitoring configuration for all Features with value type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 under this EntityType. If this is populated with [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is disabled. + * ID of the Endpoint's DeployedModel that served this explanation. */ - monitoringConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfig; + deployedModelId?: string | null; /** - * Immutable. Name of the EntityType. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. + * The explanations of the Model's PredictResponse.predictions. It has the same number of elements as instances to be explained. */ - name?: string | null; + explanations?: Schema$GoogleCloudAiplatformV1beta1Explanation[]; /** - * Optional. Config for data retention policy in offline storage. TTL in days for feature values that will be stored in offline storage. The Feature Store offline storage periodically removes obsolete feature values older than `offline_storage_ttl_days` since the feature generation time. If unset (or explicitly set to 0), default to 4000 days TTL. + * The predictions that are the output of the predictions call. Same as PredictResponse.predictions. */ - offlineStorageTtlDays?: number | null; + predictions?: any[] | null; + } + /** + * This message is a wrapper grouping Concurrent Explanations. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplainResponseConcurrentExplanation { /** - * Output only. Timestamp when this EntityType was most recently updated. + * The explanations of the Model's PredictResponse.predictions. It has the same number of elements as instances to be explained. */ - updateTime?: string | null; + explanations?: Schema$GoogleCloudAiplatformV1beta1Explanation[]; } /** - * Represents an environment variable present in a Container or Python Module. + * Explanation of a prediction (provided in PredictResponse.predictions) produced by the Model on a given instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1EnvVar { + export interface Schema$GoogleCloudAiplatformV1beta1Explanation { /** - * Required. Name of the environment variable. Must be a valid C identifier. + * Output only. Feature attributions grouped by predicted outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. By default, we provide Shapley values for the predicted class. However, you can configure the explanation request to generate Shapley values for any other classes too. For example, if a model predicts a probability of `0.4` for approving a loan application, the model's decision is to reject the application since `p(reject) = 0.6 \> p(approve) = 0.4`, and the default Shapley values would be computed for rejection decision and not approval, even though the latter might be the positive class. If users set ExplanationParameters.top_k, the attributions are sorted by instance_output_value in descending order. If ExplanationParameters.output_indices is specified, the attributions are stored by Attribution.output_index in the same order as they appear in the output_indices. */ - name?: string | null; + attributions?: Schema$GoogleCloudAiplatformV1beta1Attribution[]; /** - * Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. + * Output only. List of the nearest neighbors for example-based explanations. For models deployed with the examples explanations feature enabled, the attributions field is empty and instead the neighbors field is populated. */ - value?: string | null; + neighbors?: Schema$GoogleCloudAiplatformV1beta1Neighbor[]; } /** - * Model error analysis for each annotation. + * Metadata describing the Model's input and output for explanation. */ - export interface Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotation { + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadata { /** - * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. + * Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - attributedItems?: Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotationAttributedItem[]; + featureAttributionsSchemaUri?: string | null; /** - * The outlier score of this annotated item. Usually defined as the min of all distances from attributed items. + * Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. */ - outlierScore?: number | null; + inputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadata; + } | null; /** - * The threshold used to determine if this annotation is an outlier or not. + * Name of the source to generate embeddings for example based explanations. */ - outlierThreshold?: number | null; + latentSpaceSource?: string | null; /** - * The query type used for finding the attributed items. + * Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. */ - queryType?: string | null; + outputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOutputMetadata; + } | null; } /** - * Attributed items for a given annotation, typically representing neighbors from the training sets constrained by the query type. + * Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. */ - export interface Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotationAttributedItem { + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadata { /** - * The unique ID for each annotation. Used by FE to allocate the annotation in DB. + * Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ - annotationResourceName?: string | null; + denseShapeTensorName?: string | null; /** - * The distance of this item to the annotation. + * A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor. */ - distance?: number | null; - } - /** - * True positive, false positive, or false negative. EvaluatedAnnotation is only available under ModelEvaluationSlice with slice of `annotationSpec` dimension. - */ - export interface Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotation { + encodedBaselines?: any[] | null; /** - * Output only. The data item payload that the Model predicted this EvaluatedAnnotation on. + * Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table. */ - dataItemPayload?: any | null; + encodedTensorName?: string | null; /** - * Annotations of model error analysis results. + * Defines how the feature is encoded into the input tensor. Defaults to IDENTITY. */ - errorAnalysisAnnotations?: Schema$GoogleCloudAiplatformV1beta1ErrorAnalysisAnnotation[]; + encoding?: string | null; /** - * Output only. ID of the EvaluatedDataItemView under the same ancestor ModelEvaluation. The EvaluatedDataItemView consists of all ground truths and predictions on data_item_payload. + * The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized. */ - evaluatedDataItemViewId?: string | null; + featureValueDomain?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataFeatureValueDomain; /** - * Explanations of predictions. Each element of the explanations indicates the explanation for one explanation Method. The attributions list in the EvaluatedAnnotationExplanation.explanation object corresponds to the predictions list. For example, the second element in the attributions list explains the second element in the predictions list. + * Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name. */ - explanations?: Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotationExplanation[]; + groupName?: string | null; /** - * Output only. The ground truth Annotations, i.e. the Annotations that exist in the test data the Model is evaluated on. For true positive, there is one and only one ground truth annotation, which matches the only prediction in predictions. For false positive, there are zero or more ground truth annotations that are similar to the only prediction in predictions, but not enough for a match. For false negative, there is one and only one ground truth annotation, which doesn't match any predictions created by the model. The schema of the ground truth is stored in ModelEvaluation.annotation_schema_uri + * A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. */ - groundTruths?: any[] | null; + indexFeatureMapping?: string[] | null; /** - * Output only. The model predicted annotations. For true positive, there is one and only one prediction, which matches the only one ground truth annotation in ground_truths. For false positive, there is one and only one prediction, which doesn't match any ground truth annotation of the corresponding data_item_view_id. For false negative, there are zero or more predictions which are similar to the only ground truth annotation in ground_truths but not enough for a match. The schema of the prediction is stored in ModelEvaluation.annotation_schema_uri + * Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ - predictions?: any[] | null; + indicesTensorName?: string | null; /** - * Output only. Type of the EvaluatedAnnotation. + * Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature's input in the instance[]. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. */ - type?: string | null; - } - /** - * Explanation result of the prediction produced by the Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1EvaluatedAnnotationExplanation { + inputBaselines?: any[] | null; /** - * Explanation attribution response details. + * Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow. */ - explanation?: Schema$GoogleCloudAiplatformV1beta1Explanation; + inputTensorName?: string | null; /** - * Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` + * Modality of the feature. Valid values are: numeric, image. Defaults to numeric. */ - explanationType?: string | null; + modality?: string | null; + /** + * Visualization configurations for image explanation. + */ + visualization?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataVisualization; } /** - * Request message for EvaluationService.EvaluateInstances. + * Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. */ - export interface Schema$GoogleCloudAiplatformV1beta1EvaluateInstancesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataFeatureValueDomain { /** - * Instances and metric spec for bleu metric. + * The maximum permissible value for this feature. */ - bleuInput?: Schema$GoogleCloudAiplatformV1beta1BleuInput; + maxValue?: number | null; /** - * Input for coherence metric. + * The minimum permissible value for this feature. */ - coherenceInput?: Schema$GoogleCloudAiplatformV1beta1CoherenceInput; + minValue?: number | null; /** - * Auto metric instances. Instances and metric spec for exact match metric. + * If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization. */ - exactMatchInput?: Schema$GoogleCloudAiplatformV1beta1ExactMatchInput; + originalMean?: number | null; /** - * LLM-based metric instance. General text generation metrics, applicable to other categories. Input for fluency metric. + * If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization. */ - fluencyInput?: Schema$GoogleCloudAiplatformV1beta1FluencyInput; + originalStddev?: number | null; + } + /** + * Visualization configurations for image explanation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataVisualization { /** - * Input for fulfillment metric. + * Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62. */ - fulfillmentInput?: Schema$GoogleCloudAiplatformV1beta1FulfillmentInput; + clipPercentLowerbound?: number | null; /** - * Input for groundedness metric. + * Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9. */ - groundednessInput?: Schema$GoogleCloudAiplatformV1beta1GroundednessInput; + clipPercentUpperbound?: number | null; /** - * Input for pairwise question answering quality metric. + * The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue. */ - pairwiseQuestionAnsweringQualityInput?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInput; + colorMap?: string | null; /** - * Input for pairwise summarization quality metric. + * How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE. */ - pairwiseSummarizationQualityInput?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInput; + overlayType?: string | null; /** - * Input for question answering correctness metric. + * Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE. */ - questionAnsweringCorrectnessInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInput; + polarity?: string | null; /** - * Input for question answering helpfulness metric. + * Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES. */ - questionAnsweringHelpfulnessInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInput; + type?: string | null; + } + /** + * Metadata of the prediction output to be explained. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOutputMetadata { /** - * Input for question answering quality metric. + * Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output. */ - questionAnsweringQualityInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInput; + displayNameMappingKey?: string | null; /** - * Input for question answering relevance metric. + * Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It's not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index. */ - questionAnsweringRelevanceInput?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInput; + indexDisplayNameMapping?: any | null; /** - * Instances and metric spec for rouge metric. + * Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow. */ - rougeInput?: Schema$GoogleCloudAiplatformV1beta1RougeInput; + outputTensorName?: string | null; + } + /** + * The ExplanationMetadata entries that can be overridden at online explanation time. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverride { /** - * Input for safety metric. + * Required. Overrides the input metadata of the features. The key is the name of the feature to be overridden. The keys specified here must exist in the input metadata to be overridden. If a feature is not specified here, the corresponding feature's input metadata is not overridden. */ - safetyInput?: Schema$GoogleCloudAiplatformV1beta1SafetyInput; + inputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverrideInputMetadataOverride; + } | null; + } + /** + * The input metadata entries to be overridden. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverrideInputMetadataOverride { /** - * Input for summarization helpfulness metric. + * Baseline inputs for this feature. This overrides the `input_baseline` field of the ExplanationMetadata.InputMetadata object of the corresponding feature's input metadata. If it's not specified, the original baselines are not overridden. */ - summarizationHelpfulnessInput?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInput; + inputBaselines?: any[] | null; + } + /** + * Parameters to configure explaining for Model's predictions. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationParameters { /** - * Input for summarization quality metric. + * Example-based explanations that returns the nearest neighbors from the provided dataset. */ - summarizationQualityInput?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInput; + examples?: Schema$GoogleCloudAiplatformV1beta1Examples; /** - * Input for summarization verbosity metric. + * An attribution method that computes Aumann-Shapley values taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 */ - summarizationVerbosityInput?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInput; + integratedGradientsAttribution?: Schema$GoogleCloudAiplatformV1beta1IntegratedGradientsAttribution; /** - * Tool call metric instances. Input for tool call valid metric. + * If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). */ - toolCallValidInput?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidInput; + outputIndices?: any[] | null; /** - * Input for tool name match metric. + * An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265. */ - toolNameMatchInput?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInput; + sampledShapleyAttribution?: Schema$GoogleCloudAiplatformV1beta1SampledShapleyAttribution; /** - * Input for tool parameter key match metric. + * If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. */ - toolParameterKeyMatchInput?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInput; + topK?: number | null; /** - * Input for tool parameter key value match metric. + * An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead. */ - toolParameterKvMatchInput?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInput; + xraiAttribution?: Schema$GoogleCloudAiplatformV1beta1XraiAttribution; } /** - * Response message for EvaluationService.EvaluateInstances. + * Specification of Model explanation. */ - export interface Schema$GoogleCloudAiplatformV1beta1EvaluateInstancesResponse { - /** - * Results for bleu metric. - */ - bleuResults?: Schema$GoogleCloudAiplatformV1beta1BleuResults; + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationSpec { /** - * Result for coherence metric. + * Optional. Metadata describing the Model's input and output for explanation. */ - coherenceResult?: Schema$GoogleCloudAiplatformV1beta1CoherenceResult; + metadata?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadata; /** - * Auto metric evaluation results. Results for exact match metric. + * Required. Parameters that configure explaining of the Model's predictions. */ - exactMatchResults?: Schema$GoogleCloudAiplatformV1beta1ExactMatchResults; + parameters?: Schema$GoogleCloudAiplatformV1beta1ExplanationParameters; + } + /** + * The ExplanationSpec entries that can be overridden at online explanation time. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExplanationSpecOverride { /** - * LLM-based metric evaluation result. General text generation metrics, applicable to other categories. Result for fluency metric. + * The example-based explanations parameter overrides. */ - fluencyResult?: Schema$GoogleCloudAiplatformV1beta1FluencyResult; + examplesOverride?: Schema$GoogleCloudAiplatformV1beta1ExamplesOverride; /** - * Result for fulfillment metric. + * The metadata to be overridden. If not specified, no metadata is overridden. */ - fulfillmentResult?: Schema$GoogleCloudAiplatformV1beta1FulfillmentResult; + metadata?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverride; /** - * Result for groundedness metric. + * The parameters to be overridden. Note that the attribution method cannot be changed. If not specified, no parameter is overridden. */ - groundednessResult?: Schema$GoogleCloudAiplatformV1beta1GroundednessResult; + parameters?: Schema$GoogleCloudAiplatformV1beta1ExplanationParameters; + } + /** + * Describes what part of the Dataset is to be exported, the destination of the export and how to export. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportDataConfig { /** - * Result for pairwise question answering quality metric. + * An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations. */ - pairwiseQuestionAnsweringQualityResult?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityResult; + annotationsFilter?: string | null; /** - * Result for pairwise summarization quality metric. + * Split based on fractions defining the size of each set. */ - pairwiseSummarizationQualityResult?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityResult; + fractionSplit?: Schema$GoogleCloudAiplatformV1beta1ExportFractionSplit; /** - * Result for question answering correctness metric. + * The Google Cloud Storage location where the output is to be written to. In the given directory a new directory will be created with name: `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. Inside that directory, annotations with the same schema will be grouped into sub directories which are named with the corresponding annotations' schema title. Inside these sub directories, a schema.yaml will be created to describe the output format. */ - questionAnsweringCorrectnessResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessResult; + gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + } + /** + * Runtime operation information for DatasetService.ExportData. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportDataOperationMetadata { /** - * Result for question answering helpfulness metric. + * A Google Cloud Storage directory which path ends with '/'. The exported data is stored in the directory. */ - questionAnsweringHelpfulnessResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessResult; + gcsOutputDirectory?: string | null; /** - * Question answering only metrics. Result for question answering quality metric. + * The common part of the operation metadata. */ - questionAnsweringQualityResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityResult; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Request message for DatasetService.ExportData. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportDataRequest { /** - * Result for question answering relevance metric. + * Required. The desired output location. */ - questionAnsweringRelevanceResult?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceResult; + exportConfig?: Schema$GoogleCloudAiplatformV1beta1ExportDataConfig; + } + /** + * Response message for DatasetService.ExportData. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportDataResponse { /** - * Results for rouge metric. + * All of the files that are exported in this export operation. For custom code training export, only three (training, validation and test) Cloud Storage paths in wildcard format are populated (for example, gs://.../training-*). */ - rougeResults?: Schema$GoogleCloudAiplatformV1beta1RougeResults; + exportedFiles?: string[] | null; + } + /** + * Details of operations that exports Features values. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesOperationMetadata { /** - * Result for safety metric. + * Operation metadata for Featurestore export Feature values. */ - safetyResult?: Schema$GoogleCloudAiplatformV1beta1SafetyResult; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Request message for FeaturestoreService.ExportFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequest { /** - * Result for summarization helpfulness metric. + * Required. Specifies destination location and format. */ - summarizationHelpfulnessResult?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessResult; + destination?: Schema$GoogleCloudAiplatformV1beta1FeatureValueDestination; /** - * Summarization only metrics. Result for summarization quality metric. + * Required. Selects Features to export values of. */ - summarizationQualityResult?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualityResult; + featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; /** - * Result for summarization verbosity metric. + * Exports all historical values of all entities of the EntityType within a time range */ - summarizationVerbosityResult?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityResult; + fullExport?: Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestFullExport; /** - * Tool call metrics. Results for tool call valid metric. + * Per-Feature export settings. */ - toolCallValidResults?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidResults; + settings?: Schema$GoogleCloudAiplatformV1beta1DestinationFeatureSetting[]; /** - * Results for tool name match metric. + * Exports the latest Feature values of all entities of the EntityType within a time range. */ - toolNameMatchResults?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchResults; + snapshotExport?: Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestSnapshotExport; + } + /** + * Describes exporting all historical Feature values of all entities of the EntityType between [start_time, end_time]. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestFullExport { /** - * Results for tool parameter key match metric. + * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. */ - toolParameterKeyMatchResults?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchResults; + endTime?: string | null; /** - * Results for tool parameter key value match metric. + * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. */ - toolParameterKvMatchResults?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchResults; + startTime?: string | null; } /** - * An edge describing the relationship between an Artifact and an Execution in a lineage graph. + * Describes exporting the latest Feature values of all entities of the EntityType between [start_time, snapshot_time]. */ - export interface Schema$GoogleCloudAiplatformV1beta1Event { + export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestSnapshotExport { /** - * Required. The relative resource name of the Artifact in the Event. + * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. */ - artifact?: string | null; + snapshotTime?: string | null; /** - * Output only. Time the Event occurred. + * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. */ - eventTime?: string | null; + startTime?: string | null; + } + /** + * Response message for FeaturestoreService.ExportFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesResponse {} + /** + * Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExportFractionSplit { /** - * Output only. The relative resource name of the Execution in the Event. + * The fraction of the input data that is to be used to evaluate the Model. */ - execution?: string | null; + testFraction?: number | null; /** - * The labels with user-defined metadata to annotate Events. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Event (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * The fraction of the input data that is to be used to train the Model. */ - labels?: {[key: string]: string} | null; + trainingFraction?: number | null; /** - * Required. The type of the Event. + * The fraction of the input data that is to be used to validate the Model. */ - type?: string | null; + validationFraction?: number | null; } /** - * Input for exact match metric. + * Details of ModelService.ExportModel operation. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchInput { + export interface Schema$GoogleCloudAiplatformV1beta1ExportModelOperationMetadata { /** - * Required. Repeated exact match instances. + * The common part of the operation metadata. */ - instances?: Schema$GoogleCloudAiplatformV1beta1ExactMatchInstance[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Required. Spec for exact match metric. + * Output only. Information further describing the output of this Model export. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1ExactMatchSpec; + outputInfo?: Schema$GoogleCloudAiplatformV1beta1ExportModelOperationMetadataOutputInfo; } /** - * Spec for exact match instance. + * Further describes the output of the ExportModel. Supplements ExportModelRequest.OutputConfig. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchInstance { + export interface Schema$GoogleCloudAiplatformV1beta1ExportModelOperationMetadataOutputInfo { /** - * Required. Output of the evaluated model. + * Output only. If the Model artifact is being exported to Google Cloud Storage this is the full path of the directory created, into which the Model files are being written to. */ - prediction?: string | null; + artifactOutputUri?: string | null; /** - * Required. Ground truth used to compare against the prediction. + * Output only. If the Model image is being exported to Google Container Registry or Artifact Registry this is the full path of the image created. */ - reference?: string | null; + imageOutputUri?: string | null; } /** - * Exact match metric value for an instance. + * Request message for ModelService.ExportModel. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchMetricValue { + export interface Schema$GoogleCloudAiplatformV1beta1ExportModelRequest { /** - * Output only. Exact match score. + * Required. The desired output location and configuration. */ - score?: number | null; + outputConfig?: Schema$GoogleCloudAiplatformV1beta1ExportModelRequestOutputConfig; } /** - * Results for exact match metric. + * Output configuration for the Model export. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchResults { + export interface Schema$GoogleCloudAiplatformV1beta1ExportModelRequestOutputConfig { /** - * Output only. Exact match metric values. + * The Cloud Storage location where the Model artifact is to be written to. Under the directory given as the destination a new one with name "`model-export--`", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside, the Model and any of its supporting files will be written. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `ARTIFACT`. */ - exactMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ExactMatchMetricValue[]; + artifactDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + /** + * The ID of the format in which the Model must be exported. Each Model lists the export formats it supports. If no value is provided here, then the first from the list of the Model's supported formats is used by default. + */ + exportFormatId?: string | null; + /** + * The Google Container Registry or Artifact Registry uri where the Model container image will be copied to. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `IMAGE`. + */ + imageDestination?: Schema$GoogleCloudAiplatformV1beta1ContainerRegistryDestination; } /** - * Spec for exact match metric - returns 1 if prediction and reference exactly matches, otherwise 0. + * Response message of ModelService.ExportModel operation. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExactMatchSpec {} + export interface Schema$GoogleCloudAiplatformV1beta1ExportModelResponse {} /** - * Example-based explainability that returns the nearest neighbors from the provided dataset. + * Request message for TensorboardService.ExportTensorboardTimeSeriesData. */ - export interface Schema$GoogleCloudAiplatformV1beta1Examples { - /** - * The Cloud Storage input instances. - */ - exampleGcsSource?: Schema$GoogleCloudAiplatformV1beta1ExamplesExampleGcsSource; + export interface Schema$GoogleCloudAiplatformV1beta1ExportTensorboardTimeSeriesDataRequest { /** - * The Cloud Storage locations that contain the instances to be indexed for approximate nearest neighbor search. + * Exports the TensorboardTimeSeries' data that match the filter expression. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + filter?: string | null; /** - * The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). + * Field to use to sort the TensorboardTimeSeries' data. By default, TensorboardTimeSeries' data is returned in a pseudo random order. */ - nearestNeighborSearchConfig?: any | null; + orderBy?: string | null; /** - * The number of neighbors to return when querying for examples. + * The maximum number of data points to return per page. The default page_size is 1000. Values must be between 1 and 10000. Values above 10000 are coerced to 10000. */ - neighborCount?: number | null; + pageSize?: number | null; /** - * Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality. + * A page token, received from a previous ExportTensorboardTimeSeriesData call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ExportTensorboardTimeSeriesData must match the call that provided the page token. */ - presets?: Schema$GoogleCloudAiplatformV1beta1Presets; + pageToken?: string | null; } /** - * The Cloud Storage input instances. + * Response message for TensorboardService.ExportTensorboardTimeSeriesData. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExamplesExampleGcsSource { + export interface Schema$GoogleCloudAiplatformV1beta1ExportTensorboardTimeSeriesDataResponse { /** - * The format in which instances are given, if not specified, assume it's JSONL format. Currently only JSONL format is supported. + * A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - dataFormat?: string | null; + nextPageToken?: string | null; /** - * The Cloud Storage location for the input instances. + * The returned time series data points. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + timeSeriesDataPoints?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesDataPoint[]; } /** - * Overrides for example-based explanations. + * Extensions are tools for large language models to access external data, run computations, etc. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExamplesOverride { + export interface Schema$GoogleCloudAiplatformV1beta1Extension { /** - * The number of neighbors to return that have the same crowding tag. + * Output only. Timestamp when this Extension was created. */ - crowdingCount?: number | null; + createTime?: string | null; /** - * The format of the data being provided with each call. + * Optional. The description of the Extension. */ - dataFormat?: string | null; + description?: string | null; /** - * The number of neighbors to return. + * Required. The display name of the Extension. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - neighborCount?: number | null; + displayName?: string | null; /** - * Restrict the resulting nearest neighbors to respect these constraints. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - restrictions?: Schema$GoogleCloudAiplatformV1beta1ExamplesRestrictionsNamespace[]; + etag?: string | null; /** - * If true, return the embeddings instead of neighbors. + * Output only. Supported operations. */ - returnEmbeddings?: boolean | null; + extensionOperations?: Schema$GoogleCloudAiplatformV1beta1ExtensionOperation[]; + /** + * Required. Manifest of the Extension. + */ + manifest?: Schema$GoogleCloudAiplatformV1beta1ExtensionManifest; + /** + * Identifier. The resource name of the Extension. + */ + name?: string | null; + /** + * Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. + */ + privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1ExtensionPrivateServiceConnectConfig; + /** + * Optional. Runtime config controlling the runtime behavior of this Extension. + */ + runtimeConfig?: Schema$GoogleCloudAiplatformV1beta1RuntimeConfig; + /** + * Optional. Examples to illustrate the usage of the extension as a tool. + */ + toolUseExamples?: Schema$GoogleCloudAiplatformV1beta1ToolUseExample[]; + /** + * Output only. Timestamp when this Extension was most recently updated. + */ + updateTime?: string | null; } /** - * Restrictions namespace for example-based explanations overrides. + * Manifest spec of an Extension needed for runtime execution. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExamplesRestrictionsNamespace { + export interface Schema$GoogleCloudAiplatformV1beta1ExtensionManifest { /** - * The list of allowed tags. + * Required. Immutable. The API specification shown to the LLM. */ - allow?: string[] | null; + apiSpec?: Schema$GoogleCloudAiplatformV1beta1ExtensionManifestApiSpec; /** - * The list of deny tags. + * Required. Immutable. Type of auth supported by this extension. */ - deny?: string[] | null; + authConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfig; /** - * The namespace name. + * Required. The natural language description shown to the LLM. It should describe the usage of the extension, and is essential for the LLM to perform reasoning. */ - namespaceName?: string | null; + description?: string | null; + /** + * Required. Extension name shown to the LLM. The name can be up to 128 characters long. + */ + name?: string | null; } /** - * Request message for ExtensionExecutionService.ExecuteExtension. + * The API specification shown to the LLM. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExecuteExtensionRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ExtensionManifestApiSpec { /** - * Required. The desired ID of the operation to be executed in this extension as defined in ExtensionOperation.operation_id. + * Cloud Storage URI pointing to the OpenAPI spec. */ - operationId?: string | null; + openApiGcsUri?: string | null; /** - * Optional. Request parameters that will be used for executing this operation. The struct should be in a form of map with param name as the key and actual param value as the value. E.g. If this operation requires a param "name" to be set to "abc". you can set this to something like {"name": "abc"\}. + * The API spec in Open API standard and YAML format. */ - operationParams?: {[key: string]: any} | null; + openApiYaml?: string | null; + } + /** + * Operation of an extension. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ExtensionOperation { /** - * Optional. Auth config provided at runtime to override the default value in Extension.manifest.auth_config. The AuthConfig.auth_type should match the value in Extension.manifest.auth_config. + * Output only. Structured representation of a function declaration as defined by the OpenAPI Spec. */ - runtimeAuthConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfig; + functionDeclaration?: Schema$GoogleCloudAiplatformV1beta1FunctionDeclaration; + /** + * Operation ID that uniquely identifies the operations among the extension. See: "Operation Object" in https://swagger.io/specification/. This field is parsed from the OpenAPI spec. For HTTP extensions, if it does not exist in the spec, we will generate one from the HTTP method and path. + */ + operationId?: string | null; } /** - * Response message for ExtensionExecutionService.ExecuteExtension. + * PrivateExtensionConfig configuration for the extension. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExecuteExtensionResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ExtensionPrivateServiceConnectConfig { /** - * Response content from the extension. The content should be conformant to the response.content schema in the extension's manifest/OpenAPI spec. + * Required. The Service Directory resource name in which the service endpoints associated to the extension are registered. Format: `projects/{project_id\}/locations/{location_id\}/namespaces/{namespace_id\}/services/{service_id\}` - The Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) should be granted `servicedirectory.viewer` and `servicedirectory.pscAuthorizedService` roles on the resource. */ - content?: string | null; + serviceDirectory?: string | null; } /** - * Instance of a general execution. + * Feature Metadata information. For example, color is a feature that describes an apple. */ - export interface Schema$GoogleCloudAiplatformV1beta1Execution { + export interface Schema$GoogleCloudAiplatformV1beta1Feature { /** - * Output only. Timestamp when this Execution was created. + * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was created. */ createTime?: string | null; /** - * Description of the Execution + * Description of the Feature. */ description?: string | null; /** - * User provided display name of the Execution. May be up to 128 Unicode characters. + * Optional. Only applicable for Vertex AI Feature Store (Legacy). If not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If set to true, all types of data monitoring are disabled despite the config on EntityType. */ - displayName?: string | null; + disableMonitoring?: boolean | null; /** - * An eTag used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ etag?: string | null; /** - * The labels with user-defined metadata to organize your Executions. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Execution (System labels are excluded). + * Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Feature (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ labels?: {[key: string]: string} | null; /** - * Properties of the Execution. Top level metadata keys' heading and trailing spaces will be trimmed. The size of this field should not exceed 200KB. + * Optional. Only applicable for Vertex AI Feature Store (Legacy). Deprecated: The custom monitoring configuration for this Feature, if not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If this is populated with FeaturestoreMonitoringConfig.disabled = true, snapshot analysis monitoring is disabled; if FeaturestoreMonitoringConfig.monitoring_interval specified, snapshot analysis monitoring is enabled. Otherwise, snapshot analysis monitoring config is same as the EntityType's this Feature belongs to. */ - metadata?: {[key: string]: any} | null; + monitoringConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfig; /** - * Output only. The resource name of the Execution. + * Output only. Only applicable for Vertex AI Feature Store (Legacy). A list of historical SnapshotAnalysis stats requested by user, sorted by FeatureStatsAnomaly.start_time descending. */ - name?: string | null; + monitoringStats?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly[]; /** - * The title of the schema describing the metadata. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Output only. Only applicable for Vertex AI Feature Store (Legacy). The list of historical stats and anomalies with specified objectives. */ - schemaTitle?: string | null; + monitoringStatsAnomalies?: Schema$GoogleCloudAiplatformV1beta1FeatureMonitoringStatsAnomaly[]; /** - * The version of the schema in `schema_title` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as unique identifiers to identify schemas within the local metadata store. + * Immutable. Name of the Feature. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}/features/{feature\}` `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}/features/{feature\}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. */ - schemaVersion?: string | null; + name?: string | null; /** - * The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. + * Entity responsible for maintaining this feature. Can be comma separated list of email addresses or URIs. */ - state?: string | null; + pointOfContact?: string | null; /** - * Output only. Timestamp when this Execution was last updated. + * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was most recently updated. */ updateTime?: string | null; + /** + * Immutable. Only applicable for Vertex AI Feature Store (Legacy). Type of Feature value. + */ + valueType?: string | null; + /** + * Only applicable for Vertex AI Feature Store. The name of the BigQuery Table/View column hosting data for this version. If no value is provided, will use feature_id. + */ + versionColumnName?: string | null; } /** - * Request message for PredictionService.Explain. + * Vertex AI Feature Group. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplainRequest { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureGroup { /** - * Optional. This field is the same as the one above, but supports multiple explanations to occur in parallel. The key can be any string. Each override will be run against the model, then its explanations will be grouped together. Note - these explanations are run **In Addition** to the default Explanation in the deployed model. + * Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source. The BigQuery source table or view must have at least one entity ID column and a column named `feature_timestamp`. */ - concurrentExplanationSpecOverride?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1ExplanationSpecOverride; - } | null; + bigQuery?: Schema$GoogleCloudAiplatformV1beta1FeatureGroupBigQuery; /** - * If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding Endpoint.traffic_split. + * Output only. Timestamp when this FeatureGroup was created. */ - deployedModelId?: string | null; + createTime?: string | null; /** - * If specified, overrides the explanation_spec of the DeployedModel. Can be used for explaining prediction results with different configurations, such as: - Explaining top-5 predictions results as opposed to top-1; - Increasing path count or step count of the attribution methods to reduce approximate errors; - Using different baselines for explaining the prediction results. + * Optional. Description of the FeatureGroup. */ - explanationSpecOverride?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpecOverride; + description?: string | null; /** - * Required. The instances that are the input to the explanation call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the explanation call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - instances?: any[] | null; + etag?: string | null; /** - * The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' Model's PredictSchemata's parameters_schema_uri. + * Optional. The labels with user-defined metadata to organize your FeatureGroup. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureGroup(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - parameters?: any | null; + labels?: {[key: string]: string} | null; + /** + * Identifier. Name of the FeatureGroup. Format: `projects/{project\}/locations/{location\}/featureGroups/{featureGroup\}` + */ + name?: string | null; + /** + * Output only. Timestamp when this FeatureGroup was last updated. + */ + updateTime?: string | null; } /** - * Response message for PredictionService.Explain. + * Input source type for BigQuery Tables and Views. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplainResponse { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureGroupBigQuery { /** - * This field stores the results of the explanations run in parallel with The default explanation strategy/method. + * Required. Immutable. The BigQuery source URI that points to either a BigQuery Table or View. */ - concurrentExplanations?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ExplainResponseConcurrentExplanation; - } | null; + bigQuerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; /** - * ID of the Endpoint's DeployedModel that served this explanation. + * Optional. Columns to construct entity_id / row keys. If not provided defaults to `entity_id`. */ - deployedModelId?: string | null; + entityIdColumns?: string[] | null; + } + /** + * A list of historical SnapshotAnalysis or ImportFeaturesAnalysis stats requested by user, sorted by FeatureStatsAnomaly.start_time descending. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureMonitoringStatsAnomaly { /** - * The explanations of the Model's PredictResponse.predictions. It has the same number of elements as instances to be explained. + * Output only. The stats and anomalies generated at specific timestamp. */ - explanations?: Schema$GoogleCloudAiplatformV1beta1Explanation[]; + featureStatsAnomaly?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly; /** - * The predictions that are the output of the predictions call. Same as PredictResponse.predictions. + * Output only. The objective for each stats. */ - predictions?: any[] | null; + objective?: string | null; } /** - * This message is a wrapper grouping Concurrent Explanations. + * Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplainResponseConcurrentExplanation { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigma { /** - * The explanations of the Model's PredictResponse.predictions. It has the same number of elements as instances to be explained. + * Noise sigma per feature. No noise is added to features that are not set. */ - explanations?: Schema$GoogleCloudAiplatformV1beta1Explanation[]; + noiseSigma?: Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigmaNoiseSigmaForFeature[]; } /** - * Explanation of a prediction (provided in PredictResponse.predictions) produced by the Model on a given instance. + * Noise sigma for a single feature. */ - export interface Schema$GoogleCloudAiplatformV1beta1Explanation { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigmaNoiseSigmaForFeature { /** - * Output only. Feature attributions grouped by predicted outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. By default, we provide Shapley values for the predicted class. However, you can configure the explanation request to generate Shapley values for any other classes too. For example, if a model predicts a probability of `0.4` for approving a loan application, the model's decision is to reject the application since `p(reject) = 0.6 \> p(approve) = 0.4`, and the default Shapley values would be computed for rejection decision and not approval, even though the latter might be the positive class. If users set ExplanationParameters.top_k, the attributions are sorted by instance_output_value in descending order. If ExplanationParameters.output_indices is specified, the attributions are stored by Attribution.output_index in the same order as they appear in the output_indices. + * The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. */ - attributions?: Schema$GoogleCloudAiplatformV1beta1Attribution[]; + name?: string | null; /** - * Output only. List of the nearest neighbors for example-based explanations. For models deployed with the examples explanations feature enabled, the attributions field is empty and instead the neighbors field is populated. + * This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. */ - neighbors?: Schema$GoogleCloudAiplatformV1beta1Neighbor[]; + sigma?: number | null; } /** - * Metadata describing the Model's input and output for explanation. + * Vertex AI Feature Online Store provides a centralized repository for serving ML features and embedding indexes at low latency. The Feature Online Store is a top-level container. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadata { - /** - * Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. - */ - featureAttributionsSchemaUri?: string | null; - /** - * Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance. - */ - inputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadata; - } | null; + export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStore { /** - * Name of the source to generate embeddings for example based explanations. + * Contains settings for the Cloud Bigtable instance that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. */ - latentSpaceSource?: string | null; + bigtable?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtable; /** - * Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed. + * Output only. Timestamp when this FeatureOnlineStore was created. */ - outputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOutputMetadata; - } | null; - } - /** - * Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadata { + createTime?: string | null; /** - * Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + * Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. */ - denseShapeTensorName?: string | null; + dedicatedServingEndpoint?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreDedicatedServingEndpoint; /** - * A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor. + * Optional. Deprecated: This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. */ - encodedBaselines?: any[] | null; + embeddingManagement?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreEmbeddingManagement; /** - * Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - encodedTensorName?: string | null; + etag?: string | null; /** - * Defines how the feature is encoded into the input tensor. Defaults to IDENTITY. + * Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - encoding?: string | null; + labels?: {[key: string]: string} | null; /** - * The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized. + * Identifier. Name of the FeatureOnlineStore. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{featureOnlineStore\}` */ - featureValueDomain?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataFeatureValueDomain; + name?: string | null; /** - * Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name. + * Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. */ - groupName?: string | null; + optimized?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreOptimized; /** - * A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. + * Output only. State of the featureOnlineStore. */ - indexFeatureMapping?: string[] | null; + state?: string | null; /** - * Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + * Output only. Timestamp when this FeatureOnlineStore was last updated. */ - indicesTensorName?: string | null; + updateTime?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtable { /** - * Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature's input in the instance[]. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. + * Required. Autoscaling config applied to Bigtable Instance. */ - inputBaselines?: any[] | null; + autoScaling?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtableAutoScaling; + } + export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtableAutoScaling { /** - * Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow. + * Optional. A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%. */ - inputTensorName?: string | null; + cpuUtilizationTarget?: number | null; /** - * Modality of the feature. Valid values are: numeric, image. Defaults to numeric. + * Required. The maximum number of nodes to scale up to. Must be greater than or equal to min_node_count, and less than or equal to 10 times of 'min_node_count'. */ - modality?: string | null; + maxNodeCount?: number | null; /** - * Visualization configurations for image explanation. + * Required. The minimum number of nodes to scale down to. Must be greater than or equal to 1. */ - visualization?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataVisualization; + minNodeCount?: number | null; } /** - * Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. + * The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataFeatureValueDomain { - /** - * The maximum permissible value for this feature. - */ - maxValue?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreDedicatedServingEndpoint { /** - * The minimum permissible value for this feature. + * Optional. Private service connect config. The private service connection is available only for Optimized storage type, not for embedding management now. If PrivateServiceConnectConfig.enable_private_service_connect set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint. */ - minValue?: number | null; + privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig; /** - * If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization. + * Output only. This field will be populated with the domain name to use for this FeatureOnlineStore */ - originalMean?: number | null; + publicEndpointDomainName?: string | null; /** - * If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization. + * Output only. The name of the service attachment resource. Populated if private service connect is enabled and after FeatureViewSync is created. */ - originalStddev?: number | null; + serviceAttachment?: string | null; } /** - * Visualization configurations for image explanation. + * Deprecated: This sub message is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. Contains settings for embedding management. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataInputMetadataVisualization { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreEmbeddingManagement { /** - * Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62. + * Optional. Immutable. Whether to enable embedding management in this FeatureOnlineStore. It's immutable after creation to ensure the FeatureOnlineStore availability. */ - clipPercentLowerbound?: number | null; + enabled?: boolean | null; + } + /** + * Optimized storage type + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreOptimized {} + /** + * Selector for Features of an EntityType. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureSelector { /** - * Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9. + * Required. Matches Features based on ID. */ - clipPercentUpperbound?: number | null; + idMatcher?: Schema$GoogleCloudAiplatformV1beta1IdMatcher; + } + /** + * Stats and Anomaly generated at specific timestamp for specific Feature. The start_time and end_time are used to define the time range of the dataset that current stats belongs to, e.g. prediction traffic is bucketed into prediction datasets by time window. If the Dataset is not defined by time window, start_time = end_time. Timestamp of the stats and anomalies always refers to end_time. Raw stats and anomalies are stored in stats_uri or anomaly_uri in the tensorflow defined protos. Field data_stats contains almost identical information with the raw stats in Vertex AI defined proto, for UI to display. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly { /** - * The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue. + * This is the threshold used when detecting anomalies. The threshold can be changed by user, so this one might be different from ThresholdConfig.value. */ - colorMap?: string | null; + anomalyDetectionThreshold?: number | null; /** - * How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE. + * Path of the anomaly file for current feature values in Cloud Storage bucket. Format: gs:////anomalies. Example: gs://monitoring_bucket/feature_name/anomalies. Stats are stored as binary format with Protobuf message Anoamlies are stored as binary format with Protobuf message [tensorflow.metadata.v0.AnomalyInfo] (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). */ - overlayType?: string | null; + anomalyUri?: string | null; /** - * Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE. + * Deviation from the current stats to baseline stats. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. */ - polarity?: string | null; + distributionDeviation?: number | null; /** - * Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES. + * The end timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), end_time indicates the timestamp of the data used to generate stats (e.g. timestamp we take snapshots for feature values). */ - type?: string | null; - } - /** - * Metadata of the prediction output to be explained. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOutputMetadata { + endTime?: string | null; /** - * Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output. + * Feature importance score, only populated when cross-feature monitoring is enabled. For now only used to represent feature attribution score within range [0, 1] for ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW and ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT. */ - displayNameMappingKey?: string | null; + score?: number | null; /** - * Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It's not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index. + * The start timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), start_time is only used to indicate the monitoring intervals, so it always equals to (end_time - monitoring_interval). */ - indexDisplayNameMapping?: any | null; + startTime?: string | null; /** - * Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow. + * Path of the stats file for current feature values in Cloud Storage bucket. Format: gs:////stats. Example: gs://monitoring_bucket/feature_name/stats. Stats are stored as binary format with Protobuf message [tensorflow.metadata.v0.FeatureNameStatistics](https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/statistics.proto). */ - outputTensorName?: string | null; + statsUri?: string | null; } /** - * The ExplanationMetadata entries that can be overridden at online explanation time. + * Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverride { + export interface Schema$GoogleCloudAiplatformV1beta1Featurestore { /** - * Required. Overrides the input metadata of the features. The key is the name of the feature to be overridden. The keys specified here must exist in the input metadata to be overridden. If a feature is not specified here, the corresponding feature's input metadata is not overridden. + * Output only. Timestamp when this Featurestore was created. */ - inputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverrideInputMetadataOverride; - } | null; - } - /** - * The input metadata entries to be overridden. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverrideInputMetadataOverride { + createTime?: string | null; /** - * Baseline inputs for this feature. This overrides the `input_baseline` field of the ExplanationMetadata.InputMetadata object of the corresponding feature's input metadata. If it's not specified, the original baselines are not overridden. + * Optional. Customer-managed encryption key spec for data storage. If set, both of the online and offline data storage will be secured by this key. */ - inputBaselines?: any[] | null; - } - /** - * Parameters to configure explaining for Model's predictions. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationParameters { + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Example-based explanations that returns the nearest neighbors from the provided dataset. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - examples?: Schema$GoogleCloudAiplatformV1beta1Examples; + etag?: string | null; /** - * An attribution method that computes Aumann-Shapley values taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + * Optional. The labels with user-defined metadata to organize your Featurestore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Featurestore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - integratedGradientsAttribution?: Schema$GoogleCloudAiplatformV1beta1IntegratedGradientsAttribution; + labels?: {[key: string]: string} | null; /** - * If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes). + * Output only. Name of the Featurestore. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}` */ - outputIndices?: any[] | null; + name?: string | null; /** - * An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265. + * Optional. Config for online storage resources. The field should not co-exist with the field of `OnlineStoreReplicationConfig`. If both of it and OnlineStoreReplicationConfig are unset, the feature store will not have an online store and cannot be used for online serving. */ - sampledShapleyAttribution?: Schema$GoogleCloudAiplatformV1beta1SampledShapleyAttribution; + onlineServingConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfig; /** - * If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs. + * Optional. TTL in days for feature values that will be stored in online serving storage. The Feature Store online storage periodically removes obsolete feature values older than `online_storage_ttl_days` since the feature generation time. Note that `online_storage_ttl_days` should be less than or equal to `offline_storage_ttl_days` for each EntityType under a featurestore. If not set, default to 4000 days */ - topK?: number | null; + onlineStorageTtlDays?: number | null; /** - * An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead. + * Output only. State of the featurestore. */ - xraiAttribution?: Schema$GoogleCloudAiplatformV1beta1XraiAttribution; + state?: string | null; + /** + * Output only. Timestamp when this Featurestore was last updated. + */ + updateTime?: string | null; } /** - * Specification of Model explanation. + * Configuration of how features in Featurestore are monitored. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationSpec { + export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfig { /** - * Optional. Metadata describing the Model's input and output for explanation. + * Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING). */ - metadata?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadata; + categoricalThresholdConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigThresholdConfig; /** - * Required. Parameters that configure explaining of the Model's predictions. + * The config for ImportFeatures Analysis Based Feature Monitoring. */ - parameters?: Schema$GoogleCloudAiplatformV1beta1ExplanationParameters; + importFeaturesAnalysis?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigImportFeaturesAnalysis; + /** + * Threshold for numerical features of anomaly detection. This is shared by all objectives of Featurestore Monitoring for numerical features (i.e. Features with type (Feature.ValueType) DOUBLE or INT64). + */ + numericalThresholdConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigThresholdConfig; + /** + * The config for Snapshot Analysis Based Feature Monitoring. + */ + snapshotAnalysis?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigSnapshotAnalysis; } /** - * The ExplanationSpec entries that can be overridden at online explanation time. + * Configuration of the Featurestore's ImportFeature Analysis Based Monitoring. This type of analysis generates statistics for values of each Feature imported by every ImportFeatureValues operation. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExplanationSpecOverride { + export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigImportFeaturesAnalysis { /** - * The example-based explanations parameter overrides. + * The baseline used to do anomaly detection for the statistics generated by import features analysis. */ - examplesOverride?: Schema$GoogleCloudAiplatformV1beta1ExamplesOverride; - /** - * The metadata to be overridden. If not specified, no metadata is overridden. - */ - metadata?: Schema$GoogleCloudAiplatformV1beta1ExplanationMetadataOverride; + anomalyDetectionBaseline?: string | null; /** - * The parameters to be overridden. Note that the attribution method cannot be changed. If not specified, no parameter is overridden. + * Whether to enable / disable / inherite default hebavior for import features analysis. */ - parameters?: Schema$GoogleCloudAiplatformV1beta1ExplanationParameters; + state?: string | null; } /** - * Describes what part of the Dataset is to be exported, the destination of the export and how to export. + * Configuration of the Featurestore's Snapshot Analysis Based Monitoring. This type of analysis generates statistics for each Feature based on a snapshot of the latest feature value of each entities every monitoring_interval. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportDataConfig { - /** - * An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations. - */ - annotationsFilter?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigSnapshotAnalysis { /** - * Split based on fractions defining the size of each set. + * The monitoring schedule for snapshot analysis. For EntityType-level config: unset / disabled = true indicates disabled by default for Features under it; otherwise by default enable snapshot analysis monitoring with monitoring_interval for Features under it. Feature-level config: disabled = true indicates disabled regardless of the EntityType-level config; unset monitoring_interval indicates going with EntityType-level config; otherwise run snapshot analysis monitoring with monitoring_interval regardless of the EntityType-level config. Explicitly Disable the snapshot analysis based monitoring. */ - fractionSplit?: Schema$GoogleCloudAiplatformV1beta1ExportFractionSplit; + disabled?: boolean | null; /** - * The Google Cloud Storage location where the output is to be written to. In the given directory a new directory will be created with name: `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. Inside that directory, annotations with the same schema will be grouped into sub directories which are named with the corresponding annotations' schema title. Inside these sub directories, a schema.yaml will be created to describe the output format. + * Configuration of the snapshot analysis based monitoring pipeline running interval. The value is rolled up to full day. If both monitoring_interval_days and the deprecated `monitoring_interval` field are set when creating/updating EntityTypes/Features, monitoring_interval_days will be used. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; - } - /** - * Runtime operation information for DatasetService.ExportData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportDataOperationMetadata { + monitoringInterval?: string | null; /** - * A Google Cloud Storage directory which path ends with '/'. The exported data is stored in the directory. + * Configuration of the snapshot analysis based monitoring pipeline running interval. The value indicates number of days. */ - gcsOutputDirectory?: string | null; + monitoringIntervalDays?: number | null; /** - * The common part of the operation metadata. + * Customized export features time window for snapshot analysis. Unit is one day. Default value is 3 weeks. Minimum value is 1 day. Maximum value is 4000 days. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + stalenessDays?: number | null; } /** - * Request message for DatasetService.ExportData. + * The config for Featurestore Monitoring threshold. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportDataRequest { + export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigThresholdConfig { /** - * Required. The desired output location. + * Specify a threshold value that can trigger the alert. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. */ - exportConfig?: Schema$GoogleCloudAiplatformV1beta1ExportDataConfig; + value?: number | null; } /** - * Response message for DatasetService.ExportData. + * OnlineServingConfig specifies the details for provisioning online serving resources. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportDataResponse { + export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfig { /** - * All of the files that are exported in this export operation. For custom code training export, only three (training, validation and test) Cloud Storage paths in wildcard format are populated (for example, gs://.../training-*). + * The number of nodes for the online store. The number of nodes doesn't scale automatically, but you can manually update the number of nodes. If set to 0, the featurestore will not have an online store and cannot be used for online serving. */ - exportedFiles?: string[] | null; - } - /** - * Details of operations that exports Features values. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesOperationMetadata { + fixedNodeCount?: number | null; /** - * Operation metadata for Featurestore export Feature values. + * Online serving scaling configuration. Only one of `fixed_node_count` and `scaling` can be set. Setting one will reset the other. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + scaling?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfigScaling; } /** - * Request message for FeaturestoreService.ExportFeatureValues. + * Online serving scaling configuration. If min_node_count and max_node_count are set to the same value, the cluster will be configured with the fixed number of node (no auto-scaling). */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequest { - /** - * Required. Specifies destination location and format. - */ - destination?: Schema$GoogleCloudAiplatformV1beta1FeatureValueDestination; - /** - * Required. Selects Features to export values of. - */ - featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; + export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfigScaling { /** - * Exports all historical values of all entities of the EntityType within a time range + * Optional. The cpu utilization that the Autoscaler should be trying to achieve. This number is on a scale from 0 (no utilization) to 100 (total utilization), and is limited between 10 and 80. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set or set to 0, default to 50. */ - fullExport?: Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestFullExport; + cpuUtilizationTarget?: number | null; /** - * Per-Feature export settings. + * The maximum number of nodes to scale up to. Must be greater than min_node_count, and less than or equal to 10 times of 'min_node_count'. */ - settings?: Schema$GoogleCloudAiplatformV1beta1DestinationFeatureSetting[]; + maxNodeCount?: number | null; /** - * Exports the latest Feature values of all entities of the EntityType within a time range. + * Required. The minimum number of nodes to scale down to. Must be greater than or equal to 1. */ - snapshotExport?: Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestSnapshotExport; + minNodeCount?: number | null; } /** - * Describes exporting all historical Feature values of all entities of the EntityType between [start_time, end_time]. + * Value for a feature. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestFullExport { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureValue { /** - * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. + * A list of bool type feature value. */ - endTime?: string | null; + boolArrayValue?: Schema$GoogleCloudAiplatformV1beta1BoolArray; /** - * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. + * Bool type feature value. */ - startTime?: string | null; - } - /** - * Describes exporting the latest Feature values of all entities of the EntityType between [start_time, snapshot_time]. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesRequestSnapshotExport { + boolValue?: boolean | null; /** - * Exports Feature values as of this timestamp. If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. + * Bytes feature value. */ - snapshotTime?: string | null; + bytesValue?: string | null; /** - * Excludes Feature values with feature generation timestamp before this timestamp. If not set, retrieve oldest values kept in Feature Store. Timestamp, if present, must not have higher than millisecond precision. + * A list of double type feature value. */ - startTime?: string | null; - } - /** - * Response message for FeaturestoreService.ExportFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportFeatureValuesResponse {} - /** - * Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportFractionSplit { + doubleArrayValue?: Schema$GoogleCloudAiplatformV1beta1DoubleArray; /** - * The fraction of the input data that is to be used to evaluate the Model. + * Double type feature value. */ - testFraction?: number | null; + doubleValue?: number | null; /** - * The fraction of the input data that is to be used to train the Model. + * A list of int64 type feature value. */ - trainingFraction?: number | null; + int64ArrayValue?: Schema$GoogleCloudAiplatformV1beta1Int64Array; /** - * The fraction of the input data that is to be used to validate the Model. + * Int64 feature value. */ - validationFraction?: number | null; - } - /** - * Details of ModelService.ExportModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportModelOperationMetadata { + int64Value?: string | null; /** - * The common part of the operation metadata. + * Metadata of feature value. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + metadata?: Schema$GoogleCloudAiplatformV1beta1FeatureValueMetadata; /** - * Output only. Information further describing the output of this Model export. + * A list of string type feature value. */ - outputInfo?: Schema$GoogleCloudAiplatformV1beta1ExportModelOperationMetadataOutputInfo; + stringArrayValue?: Schema$GoogleCloudAiplatformV1beta1StringArray; + /** + * String feature value. + */ + stringValue?: string | null; } /** - * Further describes the output of the ExportModel. Supplements ExportModelRequest.OutputConfig. + * A destination location for Feature values and format. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportModelOperationMetadataOutputInfo { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureValueDestination { /** - * Output only. If the Model artifact is being exported to Google Cloud Storage this is the full path of the directory created, into which the Model files are being written to. + * Output in BigQuery format. BigQueryDestination.output_uri in FeatureValueDestination.bigquery_destination must refer to a table. */ - artifactOutputUri?: string | null; + bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; /** - * Output only. If the Model image is being exported to Google Container Registry or Artifact Registry this is the full path of the image created. + * Output in CSV format. Array Feature value types are not allowed in CSV format. */ - imageOutputUri?: string | null; + csvDestination?: Schema$GoogleCloudAiplatformV1beta1CsvDestination; + /** + * Output in TFRecord format. Below are the mapping from Feature value type in Featurestore to Feature value type in TFRecord: Value type in Featurestore | Value type in TFRecord DOUBLE, DOUBLE_ARRAY | FLOAT_LIST INT64, INT64_ARRAY | INT64_LIST STRING, STRING_ARRAY, BYTES | BYTES_LIST true -\> byte_string("true"), false -\> byte_string("false") BOOL, BOOL_ARRAY (true, false) | BYTES_LIST + */ + tfrecordDestination?: Schema$GoogleCloudAiplatformV1beta1TFRecordDestination; } /** - * Request message for ModelService.ExportModel. + * Container for list of values. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportModelRequest { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureValueList { /** - * Required. The desired output location and configuration. + * A list of feature values. All of them should be the same data type. */ - outputConfig?: Schema$GoogleCloudAiplatformV1beta1ExportModelRequestOutputConfig; + values?: Schema$GoogleCloudAiplatformV1beta1FeatureValue[]; } /** - * Output configuration for the Model export. + * Metadata of feature value. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportModelRequestOutputConfig { - /** - * The Cloud Storage location where the Model artifact is to be written to. Under the directory given as the destination a new one with name "`model-export--`", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside, the Model and any of its supporting files will be written. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `ARTIFACT`. - */ - artifactDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; - /** - * The ID of the format in which the Model must be exported. Each Model lists the export formats it supports. If no value is provided here, then the first from the list of the Model's supported formats is used by default. - */ - exportFormatId?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1FeatureValueMetadata { /** - * The Google Container Registry or Artifact Registry uri where the Model container image will be copied to. This field should only be set when the `exportableContent` field of the [Model.supported_export_formats] object contains `IMAGE`. + * Feature generation timestamp. Typically, it is provided by user at feature ingestion time. If not, feature store will use the system timestamp when the data is ingested into feature store. For streaming ingestion, the time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future. */ - imageDestination?: Schema$GoogleCloudAiplatformV1beta1ContainerRegistryDestination; + generateTime?: string | null; } /** - * Response message of ModelService.ExportModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportModelResponse {} - /** - * Request message for TensorboardService.ExportTensorboardTimeSeriesData. + * FeatureView is representation of values that the FeatureOnlineStore will serve based on its syncConfig. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportTensorboardTimeSeriesDataRequest { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureView { /** - * Exports the TensorboardTimeSeries' data that match the filter expression. + * Optional. Configures how data is supposed to be extracted from a BigQuery source to be loaded onto the FeatureOnlineStore. */ - filter?: string | null; + bigQuerySource?: Schema$GoogleCloudAiplatformV1beta1FeatureViewBigQuerySource; /** - * Field to use to sort the TensorboardTimeSeries' data. By default, TensorboardTimeSeries' data is returned in a pseudo random order. + * Output only. Timestamp when this FeatureView was created. */ - orderBy?: string | null; + createTime?: string | null; /** - * The maximum number of data points to return per page. The default page_size is 1000. Values must be between 1 and 10000. Values above 10000 are coerced to 10000. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - pageSize?: number | null; + etag?: string | null; /** - * A page token, received from a previous ExportTensorboardTimeSeriesData call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ExportTensorboardTimeSeriesData must match the call that provided the page token. + * Optional. Configures the features from a Feature Registry source that need to be loaded onto the FeatureOnlineStore. */ - pageToken?: string | null; - } - /** - * Response message for TensorboardService.ExportTensorboardTimeSeriesData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ExportTensorboardTimeSeriesDataResponse { + featureRegistrySource?: Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySource; /** - * A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. */ - nextPageToken?: string | null; + indexConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfig; /** - * The returned time series data points. + * Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - timeSeriesDataPoints?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesDataPoint[]; - } - /** - * Extensions are tools for large language models to access external data, run computations, etc. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Extension { + labels?: {[key: string]: string} | null; /** - * Output only. Timestamp when this Extension was created. + * Identifier. Name of the FeatureView. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}` */ - createTime?: string | null; + name?: string | null; /** - * Optional. The description of the Extension. + * Output only. A Service Account unique to this FeatureView. The role bigquery.dataViewer should be granted to this service account to allow Vertex AI Feature Store to sync data to the online store. */ - description?: string | null; + serviceAccountEmail?: string | null; /** - * Required. The display name of the Extension. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Optional. Service agent type used during data sync. By default, the Vertex AI Service Agent is used. When using an IAM Policy to isolate this FeatureView within a project, a separate service account should be provisioned by setting this field to `SERVICE_AGENT_TYPE_FEATURE_VIEW`. This will generate a separate service account to access the BigQuery source table. */ - displayName?: string | null; + serviceAgentType?: string | null; /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving. */ - etag?: string | null; + syncConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncConfig; /** - * Output only. Supported operations. + * Output only. Timestamp when this FeatureView was last updated. */ - extensionOperations?: Schema$GoogleCloudAiplatformV1beta1ExtensionOperation[]; + updateTime?: string | null; /** - * Required. Manifest of the Extension. + * Optional. Deprecated: please use FeatureView.index_config instead. */ - manifest?: Schema$GoogleCloudAiplatformV1beta1ExtensionManifest; - /** - * Identifier. The resource name of the Extension. - */ - name?: string | null; - /** - * Optional. The PrivateServiceConnect config for the extension. If specified, the service endpoints associated with the Extension should be registered with private network access in the provided Service Directory (https://cloud.google.com/service-directory/docs/configuring-private-network-access). If the service contains more than one endpoint with a network, the service will arbitrarilty choose one of the endpoints to use for extension execution. - */ - privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1ExtensionPrivateServiceConnectConfig; - /** - * Optional. Runtime config controlling the runtime behavior of this Extension. - */ - runtimeConfig?: Schema$GoogleCloudAiplatformV1beta1RuntimeConfig; + vectorSearchConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfig; + } + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewBigQuerySource { /** - * Optional. Examples to illustrate the usage of the extension as a tool. + * Required. Columns to construct entity_id / row keys. */ - toolUseExamples?: Schema$GoogleCloudAiplatformV1beta1ToolUseExample[]; + entityIdColumns?: string[] | null; /** - * Output only. Timestamp when this Extension was most recently updated. + * Required. The BigQuery view URI that will be materialized on each sync trigger based on FeatureView.SyncConfig. */ - updateTime?: string | null; + uri?: string | null; } /** - * Manifest spec of an Extension needed for runtime execution. + * Lookup key for a feature view. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExtensionManifest { - /** - * Required. Immutable. The API specification shown to the LLM. - */ - apiSpec?: Schema$GoogleCloudAiplatformV1beta1ExtensionManifestApiSpec; + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey { /** - * Required. Immutable. Type of auth supported by this extension. + * The actual Entity ID will be composed from this struct. This should match with the way ID is defined in the FeatureView spec. */ - authConfig?: Schema$GoogleCloudAiplatformV1beta1AuthConfig; + compositeKey?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKeyCompositeKey; /** - * Required. The natural language description shown to the LLM. It should describe the usage of the extension, and is essential for the LLM to perform reasoning. + * String key to use for lookup. */ - description?: string | null; + key?: string | null; + } + /** + * ID that is comprised from several parts (columns). + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKeyCompositeKey { /** - * Required. Extension name shown to the LLM. The name can be up to 128 characters long. + * Parts to construct Entity ID. Should match with the same ID columns as defined in FeatureView in the same order. */ - name?: string | null; + parts?: string[] | null; } /** - * The API specification shown to the LLM. + * A Feature Registry source for features that need to be synced to Online Store. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExtensionManifestApiSpec { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySource { /** - * Cloud Storage URI pointing to the OpenAPI spec. + * Required. List of features that need to be synced to Online Store. */ - openApiGcsUri?: string | null; + featureGroups?: Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySourceFeatureGroup[]; /** - * The API spec in Open API standard and YAML format. + * Optional. The project number of the parent project of the Feature Groups. */ - openApiYaml?: string | null; + projectNumber?: string | null; } /** - * Operation of an extension. + * Features belonging to a single feature group that will be synced to Online Store. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExtensionOperation { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySourceFeatureGroup { /** - * Output only. Structured representation of a function declaration as defined by the OpenAPI Spec. + * Required. Identifier of the feature group. */ - functionDeclaration?: Schema$GoogleCloudAiplatformV1beta1FunctionDeclaration; + featureGroupId?: string | null; /** - * Operation ID that uniquely identifies the operations among the extension. See: "Operation Object" in https://swagger.io/specification/. This field is parsed from the OpenAPI spec. For HTTP extensions, if it does not exist in the spec, we will generate one from the HTTP method and path. + * Required. Identifiers of features under the feature group. */ - operationId?: string | null; + featureIds?: string[] | null; } /** - * PrivateExtensionConfig configuration for the extension. + * Configuration for vector indexing. */ - export interface Schema$GoogleCloudAiplatformV1beta1ExtensionPrivateServiceConnectConfig { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfig { /** - * Required. The Service Directory resource name in which the service endpoints associated to the extension are registered. Format: `projects/{project_id\}/locations/{location_id\}/namespaces/{namespace_id\}/services/{service_id\}` - The Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) should be granted `servicedirectory.viewer` and `servicedirectory.pscAuthorizedService` roles on the resource. + * Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. */ - serviceDirectory?: string | null; - } - /** - * Feature Metadata information. For example, color is a feature that describes an apple. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Feature { + bruteForceConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigBruteForceConfig; /** - * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was created. + * Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. */ - createTime?: string | null; + crowdingColumn?: string | null; /** - * Description of the Feature. + * Optional. The distance measure used in nearest neighbor search. */ - description?: string | null; + distanceMeasureType?: string | null; /** - * Optional. Only applicable for Vertex AI Feature Store (Legacy). If not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If set to true, all types of data monitoring are disabled despite the config on EntityType. + * Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. */ - disableMonitoring?: boolean | null; + embeddingColumn?: string | null; /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Optional. The number of dimensions of the input embedding. */ - etag?: string | null; + embeddingDimension?: number | null; /** - * Optional. The labels with user-defined metadata to organize your Features. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Feature (System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Optional. Columns of features that're used to filter vector search results. */ - labels?: {[key: string]: string} | null; + filterColumns?: string[] | null; /** - * Optional. Only applicable for Vertex AI Feature Store (Legacy). Deprecated: The custom monitoring configuration for this Feature, if not set, use the monitoring_config defined for the EntityType this Feature belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 can enable monitoring. If this is populated with FeaturestoreMonitoringConfig.disabled = true, snapshot analysis monitoring is disabled; if FeaturestoreMonitoringConfig.monitoring_interval specified, snapshot analysis monitoring is enabled. Otherwise, snapshot analysis monitoring config is same as the EntityType's this Feature belongs to. + * Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 */ - monitoringConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfig; + treeAhConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigTreeAHConfig; + } + /** + * Configuration options for using brute force search. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigBruteForceConfig {} + /** + * Configuration options for the tree-AH algorithm. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigTreeAHConfig { /** - * Output only. Only applicable for Vertex AI Feature Store (Legacy). A list of historical SnapshotAnalysis stats requested by user, sorted by FeatureStatsAnomaly.start_time descending. + * Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. */ - monitoringStats?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly[]; + leafNodeEmbeddingCount?: string | null; + } + /** + * FeatureViewSync is a representation of sync operation which copies data from data source to Feature View in Online Store. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewSync { /** - * Output only. Only applicable for Vertex AI Feature Store (Legacy). The list of historical stats and anomalies with specified objectives. + * Output only. Time when this FeatureViewSync is created. Creation of a FeatureViewSync means that the job is pending / waiting for sufficient resources but may not have started the actual data transfer yet. */ - monitoringStatsAnomalies?: Schema$GoogleCloudAiplatformV1beta1FeatureMonitoringStatsAnomaly[]; + createTime?: string | null; /** - * Immutable. Name of the Feature. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entity_type\}/features/{feature\}` `projects/{project\}/locations/{location\}/featureGroups/{feature_group\}/features/{feature\}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. + * Output only. Final status of the FeatureViewSync. + */ + finalStatus?: Schema$GoogleRpcStatus; + /** + * Identifier. Name of the FeatureViewSync. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}/featureViewSyncs/{feature_view_sync\}` */ name?: string | null; /** - * Entity responsible for maintaining this feature. Can be comma separated list of email addresses or URIs. + * Output only. Time when this FeatureViewSync is finished. */ - pointOfContact?: string | null; + runTime?: Schema$GoogleTypeInterval; /** - * Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was most recently updated. + * Output only. Summary of the sync job. */ - updateTime?: string | null; + syncSummary?: Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncSyncSummary; + } + /** + * Configuration for Sync. Only one option is set. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncConfig { /** - * Immutable. Only applicable for Vertex AI Feature Store (Legacy). Type of Feature value. + * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE\}" or "TZ=${IANA_TIME_ZONE\}". The ${IANA_TIME_ZONE\} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". */ - valueType?: string | null; + cron?: string | null; + } + /** + * Summary from the Sync job. For continuous syncs, the summary is updated periodically. For batch syncs, it gets updated on completion of the sync. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncSyncSummary { /** - * Only applicable for Vertex AI Feature Store. The name of the BigQuery Table/View column hosting data for this version. If no value is provided, will use feature_id. + * Output only. Total number of rows synced. */ - versionColumnName?: string | null; + rowSynced?: string | null; + /** + * Output only. BigQuery slot milliseconds consumed for the sync job. + */ + totalSlot?: string | null; } /** - * Vertex AI Feature Group. + * Deprecated. Use IndexConfig instead. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureGroup { + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfig { /** - * Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source. The BigQuery source table or view must have at least one entity ID column and a column named `feature_timestamp`. + * Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. */ - bigQuery?: Schema$GoogleCloudAiplatformV1beta1FeatureGroupBigQuery; + bruteForceConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigBruteForceConfig; /** - * Output only. Timestamp when this FeatureGroup was created. + * Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. */ - createTime?: string | null; + crowdingColumn?: string | null; /** - * Optional. Description of the FeatureGroup. + * Optional. The distance measure used in nearest neighbor search. */ - description?: string | null; + distanceMeasureType?: string | null; /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. */ - etag?: string | null; + embeddingColumn?: string | null; /** - * Optional. The labels with user-defined metadata to organize your FeatureGroup. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureGroup(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Optional. The number of dimensions of the input embedding. */ - labels?: {[key: string]: string} | null; + embeddingDimension?: number | null; /** - * Identifier. Name of the FeatureGroup. Format: `projects/{project\}/locations/{location\}/featureGroups/{featureGroup\}` + * Optional. Columns of features that're used to filter vector search results. */ - name?: string | null; + filterColumns?: string[] | null; /** - * Output only. Timestamp when this FeatureGroup was last updated. + * Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 */ - updateTime?: string | null; + treeAhConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigTreeAHConfig; + } + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigBruteForceConfig {} + export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigTreeAHConfig { + /** + * Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + */ + leafNodeEmbeddingCount?: string | null; } /** - * Input source type for BigQuery Tables and Views. + * Request message for FeatureOnlineStoreService.FetchFeatureValues. All the features under the requested feature view will be returned. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureGroupBigQuery { + export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesRequest { /** - * Required. Immutable. The BigQuery source URI that points to either a BigQuery Table or View. + * Optional. Response data format. If not set, FeatureViewDataFormat.KEY_VALUE will be used. */ - bigQuerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; + dataFormat?: string | null; /** - * Optional. Columns to construct entity_id / row keys. If not provided defaults to `entity_id`. + * Optional. The request key to fetch feature values for. */ - entityIdColumns?: string[] | null; + dataKey?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey; + /** + * Specify response data format. If not set, KeyValue format will be used. Deprecated. Use FetchFeatureValuesRequest.data_format. + */ + format?: string | null; + /** + * Simple ID. The whole string will be used as is to identify Entity to fetch feature values for. + */ + id?: string | null; } /** - * A list of historical SnapshotAnalysis or ImportFeaturesAnalysis stats requested by user, sorted by FeatureStatsAnomaly.start_time descending. + * Response message for FeatureOnlineStoreService.FetchFeatureValues */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureMonitoringStatsAnomaly { + export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponse { /** - * Output only. The stats and anomalies generated at specific timestamp. + * The data key associated with this response. Will only be populated for FeatureOnlineStoreService.StreamingFetchFeatureValues RPCs. */ - featureStatsAnomaly?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly; + dataKey?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey; /** - * Output only. The objective for each stats. + * Feature values in KeyValue format. */ - objective?: string | null; + keyValues?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairList; + /** + * Feature values in proto Struct format. + */ + protoStruct?: {[key: string]: any} | null; } /** - * Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. + * Response structure in the format of key (feature name) and (feature) value pair. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigma { + export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairList { /** - * Noise sigma per feature. No noise is added to features that are not set. + * List of feature names and values. */ - noiseSigma?: Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigmaNoiseSigmaForFeature[]; + features?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair[]; } /** - * Noise sigma for a single feature. + * Feature name & value pair. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigmaNoiseSigmaForFeature { + export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { /** - * The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs. + * Feature short name. */ name?: string | null; /** - * This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1. + * Feature value. */ - sigma?: number | null; + value?: Schema$GoogleCloudAiplatformV1beta1FeatureValue; } /** - * Vertex AI Feature Online Store provides a centralized repository for serving ML features and embedding indexes at low latency. The Feature Online Store is a top-level container. + * URI based data. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStore { - /** - * Contains settings for the Cloud Bigtable instance that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. - */ - bigtable?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtable; - /** - * Output only. Timestamp when this FeatureOnlineStore was created. - */ - createTime?: string | null; - /** - * Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. - */ - dedicatedServingEndpoint?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreDedicatedServingEndpoint; + export interface Schema$GoogleCloudAiplatformV1beta1FileData { /** - * Optional. Deprecated: This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. + * Required. URI. */ - embeddingManagement?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreEmbeddingManagement; + fileUri?: string | null; /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Required. The IANA standard MIME type of the source data. */ - etag?: string | null; + mimeType?: string | null; + } + /** + * Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FilterSplit { /** - * Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. */ - labels?: {[key: string]: string} | null; + testFilter?: string | null; /** - * Identifier. Name of the FeatureOnlineStore. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{featureOnlineStore\}` + * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. */ - name?: string | null; + trainingFilter?: string | null; /** - * Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. + * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. */ - optimized?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreOptimized; + validationFilter?: string | null; + } + /** + * The request message for MatchService.FindNeighbors. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequest { /** - * Output only. State of the featureOnlineStore. + * The ID of the DeployedIndex that will serve the request. This request is sent to a specific IndexEndpoint, as per the IndexEndpoint.network. That IndexEndpoint also has IndexEndpoint.deployed_indexes, and each such index has a DeployedIndex.id field. The value of the field below must equal one of the DeployedIndex.id fields of the IndexEndpoint that is being called for this request. */ - state?: string | null; + deployedIndexId?: string | null; /** - * Output only. Timestamp when this FeatureOnlineStore was last updated. + * The list of queries. */ - updateTime?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtable { + queries?: Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequestQuery[]; /** - * Required. Autoscaling config applied to Bigtable Instance. + * If set to true, the full datapoints (including all vector values and restricts) of the nearest neighbors are returned. Note that returning full datapoint will significantly increase the latency and cost of the query. */ - autoScaling?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtableAutoScaling; + returnFullDatapoint?: boolean | null; } - export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreBigtableAutoScaling { + /** + * A query to find a number of the nearest neighbors (most similar vectors) of a vector. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequestQuery { /** - * Optional. A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%. + * The number of neighbors to find via approximate search before exact reordering is performed. If not set, the default value from scam config is used; if set, this value must be \> 0. */ - cpuUtilizationTarget?: number | null; + approximateNeighborCount?: number | null; /** - * Required. The maximum number of nodes to scale up to. Must be greater than or equal to min_node_count, and less than or equal to 10 times of 'min_node_count'. + * Required. The datapoint/vector whose nearest neighbors should be searched for. */ - maxNodeCount?: number | null; + datapoint?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint; /** - * Required. The minimum number of nodes to scale down to. Must be greater than or equal to 1. + * The fraction of the number of leaves to search, set at query time allows user to tune search performance. This value increase result in both search accuracy and latency increase. The value should be between 0.0 and 1.0. If not set or set to 0.0, query uses the default value specified in NearestNeighborSearchConfig.TreeAHConfig.fraction_leaf_nodes_to_search. */ - minNodeCount?: number | null; - } - /** - * The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreDedicatedServingEndpoint { + fractionLeafNodesToSearchOverride?: number | null; /** - * Optional. Private service connect config. The private service connection is available only for Optimized storage type, not for embedding management now. If PrivateServiceConnectConfig.enable_private_service_connect set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint. + * The number of nearest neighbors to be retrieved from database for each query. If not set, will use the default from the service configuration (https://cloud.google.com/vertex-ai/docs/matching-engine/configuring-indexes#nearest-neighbor-search-config). */ - privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig; + neighborCount?: number | null; /** - * Output only. This field will be populated with the domain name to use for this FeatureOnlineStore + * Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. */ - publicEndpointDomainName?: string | null; + perCrowdingAttributeNeighborCount?: number | null; /** - * Output only. The name of the service attachment resource. Populated if private service connect is enabled and after FeatureViewSync is created. + * Optional. Represents RRF algorithm that combines search results. */ - serviceAttachment?: string | null; + rrf?: Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF; } /** - * Deprecated: This sub message is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. Contains settings for embedding management. + * Parameters for RRF algorithm that combines search results. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreEmbeddingManagement { + export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF { /** - * Optional. Immutable. Whether to enable embedding management in this FeatureOnlineStore. It's immutable after creation to ensure the FeatureOnlineStore availability. + * Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. */ - enabled?: boolean | null; + alpha?: number | null; } /** - * Optimized storage type - */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStoreOptimized {} - /** - * Selector for Features of an EntityType. + * The response message for MatchService.FindNeighbors. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureSelector { + export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponse { /** - * Required. Matches Features based on ID. + * The nearest neighbors of the query datapoints. */ - idMatcher?: Schema$GoogleCloudAiplatformV1beta1IdMatcher; + nearestNeighbors?: Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNearestNeighbors[]; } /** - * Stats and Anomaly generated at specific timestamp for specific Feature. The start_time and end_time are used to define the time range of the dataset that current stats belongs to, e.g. prediction traffic is bucketed into prediction datasets by time window. If the Dataset is not defined by time window, start_time = end_time. Timestamp of the stats and anomalies always refers to end_time. Raw stats and anomalies are stored in stats_uri or anomaly_uri in the tensorflow defined protos. Field data_stats contains almost identical information with the raw stats in Vertex AI defined proto, for UI to display. + * Nearest neighbors for one query. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly { + export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNearestNeighbors { /** - * This is the threshold used when detecting anomalies. The threshold can be changed by user, so this one might be different from ThresholdConfig.value. + * The ID of the query datapoint. */ - anomalyDetectionThreshold?: number | null; + id?: string | null; /** - * Path of the anomaly file for current feature values in Cloud Storage bucket. Format: gs:////anomalies. Example: gs://monitoring_bucket/feature_name/anomalies. Stats are stored as binary format with Protobuf message Anoamlies are stored as binary format with Protobuf message [tensorflow.metadata.v0.AnomalyInfo] (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). + * All its neighbors. */ - anomalyUri?: string | null; + neighbors?: Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNeighbor[]; + } + /** + * A neighbor of the query vector. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNeighbor { /** - * Deviation from the current stats to baseline stats. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. + * The datapoint of the neighbor. Note that full datapoints are returned only when "return_full_datapoint" is set to true. Otherwise, only the "datapoint_id" and "crowding_tag" fields are populated. */ - distributionDeviation?: number | null; + datapoint?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint; /** - * The end timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), end_time indicates the timestamp of the data used to generate stats (e.g. timestamp we take snapshots for feature values). + * The distance between the neighbor and the dense embedding query. */ - endTime?: string | null; + distance?: number | null; /** - * Feature importance score, only populated when cross-feature monitoring is enabled. For now only used to represent feature attribution score within range [0, 1] for ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW and ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT. + * The distance between the neighbor and the query sparse_embedding. */ - score?: number | null; + sparseDistance?: number | null; + } + /** + * Input for fluency metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FluencyInput { /** - * The start timestamp of window where stats were generated. For objectives where time window doesn't make sense (e.g. Featurestore Snapshot Monitoring), start_time is only used to indicate the monitoring intervals, so it always equals to (end_time - monitoring_interval). + * Required. Fluency instance. */ - startTime?: string | null; + instance?: Schema$GoogleCloudAiplatformV1beta1FluencyInstance; /** - * Path of the stats file for current feature values in Cloud Storage bucket. Format: gs:////stats. Example: gs://monitoring_bucket/feature_name/stats. Stats are stored as binary format with Protobuf message [tensorflow.metadata.v0.FeatureNameStatistics](https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/statistics.proto). + * Required. Spec for fluency score metric. */ - statsUri?: string | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1FluencySpec; } /** - * Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. + * Spec for fluency instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1Featurestore { - /** - * Output only. Timestamp when this Featurestore was created. - */ - createTime?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1FluencyInstance { /** - * Optional. Customer-managed encryption key spec for data storage. If set, both of the online and offline data storage will be secured by this key. + * Required. Output of the evaluated model. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + prediction?: string | null; + } + /** + * Spec for fluency result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FluencyResult { /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Output only. Confidence for fluency score. */ - etag?: string | null; + confidence?: number | null; /** - * Optional. The labels with user-defined metadata to organize your Featurestore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one Featurestore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Output only. Explanation for fluency score. */ - labels?: {[key: string]: string} | null; + explanation?: string | null; /** - * Output only. Name of the Featurestore. Format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}` + * Output only. Fluency score. */ - name?: string | null; + score?: number | null; + } + /** + * Spec for fluency score metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FluencySpec { /** - * Optional. Config for online storage resources. The field should not co-exist with the field of `OnlineStoreReplicationConfig`. If both of it and OnlineStoreReplicationConfig are unset, the feature store will not have an online store and cannot be used for online serving. + * Optional. Which version to use for evaluation. */ - onlineServingConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfig; + version?: number | null; + } + /** + * Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. + */ + export interface Schema$GoogleCloudAiplatformV1beta1FractionSplit { /** - * Optional. TTL in days for feature values that will be stored in online serving storage. The Feature Store online storage periodically removes obsolete feature values older than `online_storage_ttl_days` since the feature generation time. Note that `online_storage_ttl_days` should be less than or equal to `offline_storage_ttl_days` for each EntityType under a featurestore. If not set, default to 4000 days + * The fraction of the input data that is to be used to evaluate the Model. */ - onlineStorageTtlDays?: number | null; + testFraction?: number | null; /** - * Output only. State of the featurestore. + * The fraction of the input data that is to be used to train the Model. */ - state?: string | null; + trainingFraction?: number | null; /** - * Output only. Timestamp when this Featurestore was last updated. + * The fraction of the input data that is to be used to validate the Model. */ - updateTime?: string | null; + validationFraction?: number | null; } /** - * Configuration of how features in Featurestore are monitored. + * Input for fulfillment metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfig { - /** - * Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING). - */ - categoricalThresholdConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigThresholdConfig; - /** - * The config for ImportFeatures Analysis Based Feature Monitoring. - */ - importFeaturesAnalysis?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigImportFeaturesAnalysis; + export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentInput { /** - * Threshold for numerical features of anomaly detection. This is shared by all objectives of Featurestore Monitoring for numerical features (i.e. Features with type (Feature.ValueType) DOUBLE or INT64). + * Required. Fulfillment instance. */ - numericalThresholdConfig?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigThresholdConfig; + instance?: Schema$GoogleCloudAiplatformV1beta1FulfillmentInstance; /** - * The config for Snapshot Analysis Based Feature Monitoring. + * Required. Spec for fulfillment score metric. */ - snapshotAnalysis?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigSnapshotAnalysis; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1FulfillmentSpec; } /** - * Configuration of the Featurestore's ImportFeature Analysis Based Monitoring. This type of analysis generates statistics for values of each Feature imported by every ImportFeatureValues operation. + * Spec for fulfillment instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigImportFeaturesAnalysis { + export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentInstance { /** - * The baseline used to do anomaly detection for the statistics generated by import features analysis. + * Required. Inference instruction prompt to compare prediction with. */ - anomalyDetectionBaseline?: string | null; + instruction?: string | null; /** - * Whether to enable / disable / inherite default hebavior for import features analysis. + * Required. Output of the evaluated model. */ - state?: string | null; + prediction?: string | null; } /** - * Configuration of the Featurestore's Snapshot Analysis Based Monitoring. This type of analysis generates statistics for each Feature based on a snapshot of the latest feature value of each entities every monitoring_interval. + * Spec for fulfillment result. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigSnapshotAnalysis { + export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentResult { /** - * The monitoring schedule for snapshot analysis. For EntityType-level config: unset / disabled = true indicates disabled by default for Features under it; otherwise by default enable snapshot analysis monitoring with monitoring_interval for Features under it. Feature-level config: disabled = true indicates disabled regardless of the EntityType-level config; unset monitoring_interval indicates going with EntityType-level config; otherwise run snapshot analysis monitoring with monitoring_interval regardless of the EntityType-level config. Explicitly Disable the snapshot analysis based monitoring. + * Output only. Confidence for fulfillment score. */ - disabled?: boolean | null; + confidence?: number | null; /** - * Configuration of the snapshot analysis based monitoring pipeline running interval. The value is rolled up to full day. If both monitoring_interval_days and the deprecated `monitoring_interval` field are set when creating/updating EntityTypes/Features, monitoring_interval_days will be used. + * Output only. Explanation for fulfillment score. */ - monitoringInterval?: string | null; + explanation?: string | null; /** - * Configuration of the snapshot analysis based monitoring pipeline running interval. The value indicates number of days. + * Output only. Fulfillment score. */ - monitoringIntervalDays?: number | null; - /** - * Customized export features time window for snapshot analysis. Unit is one day. Default value is 3 weeks. Minimum value is 1 day. Maximum value is 4000 days. - */ - stalenessDays?: number | null; + score?: number | null; } /** - * The config for Featurestore Monitoring threshold. + * Spec for fulfillment metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreMonitoringConfigThresholdConfig { + export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentSpec { /** - * Specify a threshold value that can trigger the alert. 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + * Optional. Which version to use for evaluation. */ - value?: number | null; + version?: number | null; } /** - * OnlineServingConfig specifies the details for provisioning online serving resources. + * A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfig { + export interface Schema$GoogleCloudAiplatformV1beta1FunctionCall { /** - * The number of nodes for the online store. The number of nodes doesn't scale automatically, but you can manually update the number of nodes. If set to 0, the featurestore will not have an online store and cannot be used for online serving. + * Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. */ - fixedNodeCount?: number | null; + args?: {[key: string]: any} | null; /** - * Online serving scaling configuration. Only one of `fixed_node_count` and `scaling` can be set. Setting one will reset the other. + * Required. The name of the function to call. Matches [FunctionDeclaration.name]. */ - scaling?: Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfigScaling; + name?: string | null; } /** - * Online serving scaling configuration. If min_node_count and max_node_count are set to the same value, the cluster will be configured with the fixed number of node (no auto-scaling). + * Function calling config. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeaturestoreOnlineServingConfigScaling { - /** - * Optional. The cpu utilization that the Autoscaler should be trying to achieve. This number is on a scale from 0 (no utilization) to 100 (total utilization), and is limited between 10 and 80. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set or set to 0, default to 50. - */ - cpuUtilizationTarget?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1FunctionCallingConfig { /** - * The maximum number of nodes to scale up to. Must be greater than min_node_count, and less than or equal to 10 times of 'min_node_count'. + * Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. */ - maxNodeCount?: number | null; + allowedFunctionNames?: string[] | null; /** - * Required. The minimum number of nodes to scale down to. Must be greater than or equal to 1. + * Optional. Function calling mode. */ - minNodeCount?: number | null; + mode?: string | null; } /** - * Value for a feature. + * Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureValue { - /** - * A list of bool type feature value. - */ - boolArrayValue?: Schema$GoogleCloudAiplatformV1beta1BoolArray; - /** - * Bool type feature value. - */ - boolValue?: boolean | null; - /** - * Bytes feature value. - */ - bytesValue?: string | null; - /** - * A list of double type feature value. - */ - doubleArrayValue?: Schema$GoogleCloudAiplatformV1beta1DoubleArray; - /** - * Double type feature value. - */ - doubleValue?: number | null; - /** - * A list of int64 type feature value. - */ - int64ArrayValue?: Schema$GoogleCloudAiplatformV1beta1Int64Array; + export interface Schema$GoogleCloudAiplatformV1beta1FunctionDeclaration { /** - * Int64 feature value. + * Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. */ - int64Value?: string | null; + description?: string | null; /** - * Metadata of feature value. + * Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1FeatureValueMetadata; + name?: string | null; /** - * A list of string type feature value. + * Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1 */ - stringArrayValue?: Schema$GoogleCloudAiplatformV1beta1StringArray; + parameters?: Schema$GoogleCloudAiplatformV1beta1Schema; /** - * String feature value. + * Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. */ - stringValue?: string | null; + response?: Schema$GoogleCloudAiplatformV1beta1Schema; } /** - * A destination location for Feature values and format. + * The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureValueDestination { - /** - * Output in BigQuery format. BigQueryDestination.output_uri in FeatureValueDestination.bigquery_destination must refer to a table. - */ - bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; + export interface Schema$GoogleCloudAiplatformV1beta1FunctionResponse { /** - * Output in CSV format. Array Feature value types are not allowed in CSV format. + * Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. */ - csvDestination?: Schema$GoogleCloudAiplatformV1beta1CsvDestination; + name?: string | null; /** - * Output in TFRecord format. Below are the mapping from Feature value type in Featurestore to Feature value type in TFRecord: Value type in Featurestore | Value type in TFRecord DOUBLE, DOUBLE_ARRAY | FLOAT_LIST INT64, INT64_ARRAY | INT64_LIST STRING, STRING_ARRAY, BYTES | BYTES_LIST true -\> byte_string("true"), false -\> byte_string("false") BOOL, BOOL_ARRAY (true, false) | BYTES_LIST + * Required. The function response in JSON object format. */ - tfrecordDestination?: Schema$GoogleCloudAiplatformV1beta1TFRecordDestination; + response?: {[key: string]: any} | null; } /** - * Container for list of values. + * The Google Cloud Storage location where the output is to be written to. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureValueList { + export interface Schema$GoogleCloudAiplatformV1beta1GcsDestination { /** - * A list of feature values. All of them should be the same data type. + * Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. */ - values?: Schema$GoogleCloudAiplatformV1beta1FeatureValue[]; + outputUriPrefix?: string | null; } /** - * Metadata of feature value. + * The Google Cloud Storage location for the input content. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureValueMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1GcsSource { /** - * Feature generation timestamp. Typically, it is provided by user at feature ingestion time. If not, feature store will use the system timestamp when the data is ingested into feature store. For streaming ingestion, the time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future. + * Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. */ - generateTime?: string | null; + uris?: string[] | null; } /** - * FeatureView is representation of values that the FeatureOnlineStore will serve based on its syncConfig. + * Request message for NotebookInternalService.GenerateAccessToken. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureView { + export interface Schema$GoogleCloudAiplatformV1beta1GenerateAccessTokenRequest { /** - * Optional. Configures how data is supposed to be extracted from a BigQuery source to be loaded onto the FeatureOnlineStore. + * Required. The VM identity token (a JWT) for authenticating the VM. https://cloud.google.com/compute/docs/instances/verifying-instance-identity */ - bigQuerySource?: Schema$GoogleCloudAiplatformV1beta1FeatureViewBigQuerySource; + vmToken?: string | null; + } + /** + * Response message for NotebookInternalService.GenerateToken. + */ + export interface Schema$GoogleCloudAiplatformV1beta1GenerateAccessTokenResponse { /** - * Output only. Timestamp when this FeatureView was created. + * Short-lived access token string which may be used to access Google APIs. */ - createTime?: string | null; + accessToken?: string | null; /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The time in seconds when the access token expires. Typically that's 3600. */ - etag?: string | null; + expiresIn?: number | null; /** - * Optional. Configures the features from a Feature Registry source that need to be loaded onto the FeatureOnlineStore. + * Space-separated list of scopes contained in the returned token. https://cloud.google.com/docs/authentication/token-types#access-contents */ - featureRegistrySource?: Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySource; + scope?: string | null; /** - * Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + * Type of the returned access token (e.g. "Bearer"). It specifies how the token must be used. Bearer tokens may be used by any entity without proof of identity. */ - indexConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfig; + tokenType?: string | null; + } + /** + * Request message for [PredictionService.GenerateContent]. + */ + export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentRequest { /** - * Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. */ - labels?: {[key: string]: string} | null; + contents?: Schema$GoogleCloudAiplatformV1beta1Content[]; /** - * Identifier. Name of the FeatureView. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}` + * Optional. Generation config. */ - name?: string | null; + generationConfig?: Schema$GoogleCloudAiplatformV1beta1GenerationConfig; /** - * Output only. A Service Account unique to this FeatureView. The role bigquery.dataViewer should be granted to this service account to allow Vertex AI Feature Store to sync data to the online store. + * Optional. Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates. */ - serviceAccountEmail?: string | null; + safetySettings?: Schema$GoogleCloudAiplatformV1beta1SafetySetting[]; /** - * Optional. Service agent type used during data sync. By default, the Vertex AI Service Agent is used. When using an IAM Policy to isolate this FeatureView within a project, a separate service account should be provisioned by setting this field to `SERVICE_AGENT_TYPE_FEATURE_VIEW`. This will generate a separate service account to access the BigQuery source table. + * Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. */ - serviceAgentType?: string | null; + systemInstruction?: Schema$GoogleCloudAiplatformV1beta1Content; /** - * Configures when data is to be synced/updated for this FeatureView. At the end of the sync the latest featureValues for each entityId of this FeatureView are made ready for online serving. + * Optional. Tool config. This config is shared for all tools provided in the request. */ - syncConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncConfig; + toolConfig?: Schema$GoogleCloudAiplatformV1beta1ToolConfig; /** - * Output only. Timestamp when this FeatureView was last updated. + * Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. */ - updateTime?: string | null; + tools?: Schema$GoogleCloudAiplatformV1beta1Tool[]; + } + /** + * Response message for [PredictionService.GenerateContent]. + */ + export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentResponse { /** - * Optional. Deprecated: please use FeatureView.index_config instead. + * Output only. Generated candidates. */ - vectorSearchConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfig; - } - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewBigQuerySource { + candidates?: Schema$GoogleCloudAiplatformV1beta1Candidate[]; /** - * Required. Columns to construct entity_id / row keys. + * Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */ - entityIdColumns?: string[] | null; + promptFeedback?: Schema$GoogleCloudAiplatformV1beta1GenerateContentResponsePromptFeedback; /** - * Required. The BigQuery view URI that will be materialized on each sync trigger based on FeatureView.SyncConfig. + * Usage metadata about the response(s). */ - uri?: string | null; + usageMetadata?: Schema$GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetadata; } /** - * Lookup key for a feature view. + * Content filter results for a prompt sent in the request. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey { + export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentResponsePromptFeedback { /** - * The actual Entity ID will be composed from this struct. This should match with the way ID is defined in the FeatureView spec. + * Output only. Blocked reason. */ - compositeKey?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKeyCompositeKey; + blockReason?: string | null; /** - * String key to use for lookup. + * Output only. A readable block reason message. */ - key?: string | null; - } - /** - * ID that is comprised from several parts (columns). - */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKeyCompositeKey { + blockReasonMessage?: string | null; /** - * Parts to construct Entity ID. Should match with the same ID columns as defined in FeatureView in the same order. + * Output only. Safety ratings. */ - parts?: string[] | null; + safetyRatings?: Schema$GoogleCloudAiplatformV1beta1SafetyRating[]; } /** - * A Feature Registry source for features that need to be synced to Online Store. + * Usage metadata about response(s). */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySource { + export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetadata { /** - * Required. List of features that need to be synced to Online Store. + * Number of tokens in the response(s). */ - featureGroups?: Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySourceFeatureGroup[]; + candidatesTokenCount?: number | null; /** - * Optional. The project number of the parent project of the Feature Groups. + * Number of tokens in the request. */ - projectNumber?: string | null; + promptTokenCount?: number | null; + totalTokenCount?: number | null; } /** - * Features belonging to a single feature group that will be synced to Online Store. + * Generation config. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySourceFeatureGroup { + export interface Schema$GoogleCloudAiplatformV1beta1GenerationConfig { /** - * Required. Identifier of the feature group. + * Optional. Number of candidates to generate. */ - featureGroupId?: string | null; + candidateCount?: number | null; /** - * Required. Identifiers of features under the feature group. + * Optional. Frequency penalties. */ - featureIds?: string[] | null; - } - /** - * Configuration for vector indexing. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfig { + frequencyPenalty?: number | null; /** - * Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + * Optional. The maximum number of output tokens to generate per message. */ - bruteForceConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigBruteForceConfig; + maxOutputTokens?: number | null; /** - * Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + * Optional. Positive penalties. */ - crowdingColumn?: string | null; + presencePenalty?: number | null; /** - * Optional. The distance measure used in nearest neighbor search. + * Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */ - distanceMeasureType?: string | null; + responseMimeType?: string | null; /** - * Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + * Optional. Control Three levels of creativity in the model output. Default: RESPONSE_STYLE_BALANCED */ - embeddingColumn?: string | null; + responseStyle?: string | null; /** - * Optional. The number of dimensions of the input embedding. + * Optional. Stop sequences. */ - embeddingDimension?: number | null; + stopSequences?: string[] | null; /** - * Optional. Columns of features that're used to filter vector search results. + * Optional. Controls the randomness of predictions. */ - filterColumns?: string[] | null; + temperature?: number | null; /** - * Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + * Optional. If specified, top-k sampling will be used. */ - treeAhConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigTreeAHConfig; - } - /** - * Configuration options for using brute force search. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigBruteForceConfig {} - /** - * Configuration options for the tree-AH algorithm. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewIndexConfigTreeAHConfig { + topK?: number | null; /** - * Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + * Optional. If specified, nucleus sampling will be used. */ - leafNodeEmbeddingCount?: string | null; + topP?: number | null; } /** - * FeatureViewSync is a representation of sync operation which copies data from data source to Feature View in Online Store. + * Generic Metadata shared by all operations. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewSync { + export interface Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata { /** - * Output only. Time when this FeatureViewSync is created. Creation of a FeatureViewSync means that the job is pending / waiting for sufficient resources but may not have started the actual data transfer yet. + * Output only. Time when the operation was created. */ createTime?: string | null; /** - * Output only. Final status of the FeatureViewSync. - */ - finalStatus?: Schema$GoogleRpcStatus; - /** - * Identifier. Name of the FeatureViewSync. Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}/featureViewSyncs/{feature_view_sync\}` - */ - name?: string | null; - /** - * Output only. Time when this FeatureViewSync is finished. + * Output only. Partial failures encountered. E.g. single files that couldn't be read. This field should never exceed 20 entries. Status details field will contain standard Google Cloud error details. */ - runTime?: Schema$GoogleTypeInterval; + partialFailures?: Schema$GoogleRpcStatus[]; /** - * Output only. Summary of the sync job. + * Output only. Time when the operation was updated for the last time. If the operation has finished (successfully or not), this is the finish time. */ - syncSummary?: Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncSyncSummary; + updateTime?: string | null; } /** - * Configuration for Sync. Only one option is set. + * Contains information about the source of the models generated from Generative AI Studio. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncConfig { + export interface Schema$GoogleCloudAiplatformV1beta1GenieSource { /** - * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE\}" or "TZ=${IANA_TIME_ZONE\}". The ${IANA_TIME_ZONE\} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". + * Required. The public base model URI. */ - cron?: string | null; + baseModelUri?: string | null; } /** - * Summary from the Sync job. For continuous syncs, the summary is updated periodically. For batch syncs, it gets updated on completion of the sync. + * The Google Drive location for the input content. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewSyncSyncSummary { - /** - * Output only. Total number of rows synced. - */ - rowSynced?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1GoogleDriveSource { /** - * Output only. BigQuery slot milliseconds consumed for the sync job. + * Required. Google Drive resource IDs. */ - totalSlot?: string | null; + resourceIds?: Schema$GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId[]; } /** - * Deprecated. Use IndexConfig instead. + * The type and ID of the Google Drive resource. */ - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfig { - /** - * Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. - */ - bruteForceConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigBruteForceConfig; - /** - * Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. - */ - crowdingColumn?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId { /** - * Optional. The distance measure used in nearest neighbor search. + * Required. The ID of the Google Drive resource. */ - distanceMeasureType?: string | null; + resourceId?: string | null; /** - * Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + * Required. The type of the Google Drive resource. */ - embeddingColumn?: string | null; + resourceType?: string | null; + } + /** + * Input for groundedness metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1GroundednessInput { /** - * Optional. The number of dimensions of the input embedding. + * Required. Groundedness instance. */ - embeddingDimension?: number | null; + instance?: Schema$GoogleCloudAiplatformV1beta1GroundednessInstance; /** - * Optional. Columns of features that're used to filter vector search results. + * Required. Spec for groundedness metric. */ - filterColumns?: string[] | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1GroundednessSpec; + } + /** + * Spec for groundedness instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1GroundednessInstance { /** - * Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + * Required. Background information provided in context used to compare against the prediction. */ - treeAhConfig?: Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigTreeAHConfig; - } - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigBruteForceConfig {} - export interface Schema$GoogleCloudAiplatformV1beta1FeatureViewVectorSearchConfigTreeAHConfig { + context?: string | null; /** - * Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + * Required. Output of the evaluated model. */ - leafNodeEmbeddingCount?: string | null; + prediction?: string | null; } /** - * Request message for FeatureOnlineStoreService.FetchFeatureValues. All the features under the requested feature view will be returned. + * Spec for groundedness result. */ - export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1GroundednessResult { /** - * Optional. Response data format. If not set, FeatureViewDataFormat.KEY_VALUE will be used. + * Output only. Confidence for groundedness score. */ - dataFormat?: string | null; + confidence?: number | null; /** - * Optional. The request key to fetch feature values for. + * Output only. Explanation for groundedness score. */ - dataKey?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey; + explanation?: string | null; /** - * Specify response data format. If not set, KeyValue format will be used. Deprecated. Use FetchFeatureValuesRequest.data_format. + * Output only. Groundedness score. */ - format?: string | null; + score?: number | null; + } + /** + * Spec for groundedness metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1GroundednessSpec { /** - * Simple ID. The whole string will be used as is to identify Entity to fetch feature values for. + * Optional. Which version to use for evaluation. */ - id?: string | null; + version?: number | null; } /** - * Response message for FeatureOnlineStoreService.FetchFeatureValues + * Metadata returned to client when grounding is enabled. */ - export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1GroundingMetadata { /** - * The data key associated with this response. Will only be populated for FeatureOnlineStoreService.StreamingFetchFeatureValues RPCs. + * Optional. Queries executed by the retrieval tools. */ - dataKey?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey; + retrievalQueries?: string[] | null; /** - * Feature values in KeyValue format. + * Optional. Google search entry for the following-up web searches. */ - keyValues?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairList; + searchEntryPoint?: Schema$GoogleCloudAiplatformV1beta1SearchEntryPoint; /** - * Feature values in proto Struct format. + * Optional. Web search queries for the following-up web search. */ - protoStruct?: {[key: string]: any} | null; + webSearchQueries?: string[] | null; } /** - * Response structure in the format of key (feature name) and (feature) value pair. + * Represents a HyperparameterTuningJob. A HyperparameterTuningJob has a Study specification and multiple CustomJobs with identical CustomJob specification. */ - export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairList { + export interface Schema$GoogleCloudAiplatformV1beta1HyperparameterTuningJob { /** - * List of feature names and values. + * Output only. Time when the HyperparameterTuningJob was created. */ - features?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair[]; - } - /** - * Feature name & value pair. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { + createTime?: string | null; /** - * Feature short name. + * Required. The display name of the HyperparameterTuningJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - name?: string | null; + displayName?: string | null; /** - * Feature value. + * Customer-managed encryption key options for a HyperparameterTuningJob. If this is set, then all resources created by the HyperparameterTuningJob will be encrypted with the provided encryption key. */ - value?: Schema$GoogleCloudAiplatformV1beta1FeatureValue; - } - /** - * URI based data. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FileData { + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Required. URI. + * Output only. Time when the HyperparameterTuningJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ - fileUri?: string | null; + endTime?: string | null; /** - * Required. The IANA standard MIME type of the source data. + * Output only. Only populated when job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. */ - mimeType?: string | null; - } - /** - * Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FilterSplit { + error?: Schema$GoogleRpcStatus; /** - * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + * The labels with user-defined metadata to organize HyperparameterTuningJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - testFilter?: string | null; + labels?: {[key: string]: string} | null; /** - * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + * The number of failed Trials that need to be seen before failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail before the whole job fails. */ - trainingFilter?: string | null; + maxFailedTrialCount?: number | null; /** - * Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. + * Required. The desired total number of Trials. */ - validationFilter?: string | null; - } - /** - * The request message for MatchService.FindNeighbors. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequest { + maxTrialCount?: number | null; /** - * The ID of the DeployedIndex that will serve the request. This request is sent to a specific IndexEndpoint, as per the IndexEndpoint.network. That IndexEndpoint also has IndexEndpoint.deployed_indexes, and each such index has a DeployedIndex.id field. The value of the field below must equal one of the DeployedIndex.id fields of the IndexEndpoint that is being called for this request. + * Output only. Resource name of the HyperparameterTuningJob. */ - deployedIndexId?: string | null; + name?: string | null; /** - * The list of queries. + * Required. The desired number of Trials to run in parallel. */ - queries?: Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequestQuery[]; + parallelTrialCount?: number | null; /** - * If set to true, the full datapoints (including all vector values and restricts) of the nearest neighbors are returned. Note that returning full datapoint will significantly increase the latency and cost of the query. + * Output only. Time when the HyperparameterTuningJob for the first time entered the `JOB_STATE_RUNNING` state. */ - returnFullDatapoint?: boolean | null; - } - /** - * A query to find a number of the nearest neighbors (most similar vectors) of a vector. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsRequestQuery { + startTime?: string | null; /** - * The number of neighbors to find via approximate search before exact reordering is performed. If not set, the default value from scam config is used; if set, this value must be \> 0. + * Output only. The detailed state of the job. */ - approximateNeighborCount?: number | null; + state?: string | null; /** - * Required. The datapoint/vector whose nearest neighbors should be searched for. + * Required. Study configuration of the HyperparameterTuningJob. */ - datapoint?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint; + studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; /** - * The fraction of the number of leaves to search, set at query time allows user to tune search performance. This value increase result in both search accuracy and latency increase. The value should be between 0.0 and 1.0. If not set or set to 0.0, query uses the default value specified in NearestNeighborSearchConfig.TreeAHConfig.fraction_leaf_nodes_to_search. + * Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. */ - fractionLeafNodesToSearchOverride?: number | null; + trialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; /** - * The number of nearest neighbors to be retrieved from database for each query. If not set, will use the default from the service configuration (https://cloud.google.com/vertex-ai/docs/matching-engine/configuring-indexes#nearest-neighbor-search-config). + * Output only. Trials of the HyperparameterTuningJob. */ - neighborCount?: number | null; + trials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; /** - * Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. + * Output only. Time when the HyperparameterTuningJob was most recently updated. */ - perCrowdingAttributeNeighborCount?: number | null; + updateTime?: string | null; } /** - * The response message for MatchService.FindNeighbors. + * Matcher for Features of an EntityType by Feature ID. */ - export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1IdMatcher { /** - * The nearest neighbors of the query datapoints. + * Required. The following are accepted as `ids`: * A single-element list containing only `*`, which selects all Features in the target EntityType, or * A list containing only Feature IDs, which selects only Features with those IDs in the target EntityType. */ - nearestNeighbors?: Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNearestNeighbors[]; + ids?: string[] | null; } /** - * Nearest neighbors for one query. + * Describes the location from where we import data into a Dataset, together with the labels that will be applied to the DataItems and the Annotations. */ - export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNearestNeighbors { + export interface Schema$GoogleCloudAiplatformV1beta1ImportDataConfig { /** - * The ID of the query datapoint. + * Labels that will be applied to newly imported Annotations. If two Annotations are identical, one of them will be deduped. Two Annotations are considered identical if their payload, payload_schema_uri and all of their labels are the same. These labels will be overridden by Annotation labels specified inside index file referenced by import_schema_uri, e.g. jsonl file. */ - id?: string | null; + annotationLabels?: {[key: string]: string} | null; /** - * All its neighbors. + * Labels that will be applied to newly imported DataItems. If an identical DataItem as one being imported already exists in the Dataset, then these labels will be appended to these of the already existing one, and if labels with identical key is imported before, the old label value will be overwritten. If two DataItems are identical in the same import data operation, the labels will be combined and if key collision happens in this case, one of the values will be picked randomly. Two DataItems are considered identical if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation labels specified inside index file referenced by import_schema_uri, e.g. jsonl file. */ - neighbors?: Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNeighbor[]; - } - /** - * A neighbor of the query vector. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FindNeighborsResponseNeighbor { + dataItemLabels?: {[key: string]: string} | null; /** - * The datapoint of the neighbor. Note that full datapoints are returned only when "return_full_datapoint" is set to true. Otherwise, only the "datapoint_id" and "crowding_tag" fields are populated. + * The Google Cloud Storage location for the input content. */ - datapoint?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; /** - * The distance between the neighbor and the dense embedding query. + * Required. Points to a YAML file stored on Google Cloud Storage describing the import format. Validation will be done against the schema. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ - distance?: number | null; + importSchemaUri?: string | null; } /** - * Input for fluency metric. + * Runtime operation information for DatasetService.ImportData. */ - export interface Schema$GoogleCloudAiplatformV1beta1FluencyInput { - /** - * Required. Fluency instance. - */ - instance?: Schema$GoogleCloudAiplatformV1beta1FluencyInstance; + export interface Schema$GoogleCloudAiplatformV1beta1ImportDataOperationMetadata { /** - * Required. Spec for fluency score metric. + * The common part of the operation metadata. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1FluencySpec; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Spec for fluency instance. + * Request message for DatasetService.ImportData. */ - export interface Schema$GoogleCloudAiplatformV1beta1FluencyInstance { + export interface Schema$GoogleCloudAiplatformV1beta1ImportDataRequest { /** - * Required. Output of the evaluated model. + * Required. The desired input locations. The contents of all input locations will be imported in one batch. */ - prediction?: string | null; + importConfigs?: Schema$GoogleCloudAiplatformV1beta1ImportDataConfig[]; } /** - * Spec for fluency result. + * Response message for DatasetService.ImportData. */ - export interface Schema$GoogleCloudAiplatformV1beta1FluencyResult { - /** - * Output only. Confidence for fluency score. - */ - confidence?: number | null; - /** - * Output only. Explanation for fluency score. - */ - explanation?: string | null; - /** - * Output only. Fluency score. - */ - score?: number | null; - } + export interface Schema$GoogleCloudAiplatformV1beta1ImportDataResponse {} /** - * Spec for fluency score metric. + * Details of ExtensionRegistryService.ImportExtension operation. */ - export interface Schema$GoogleCloudAiplatformV1beta1FluencySpec { + export interface Schema$GoogleCloudAiplatformV1beta1ImportExtensionOperationMetadata { /** - * Optional. Which version to use for evaluation. + * The common part of the operation metadata. */ - version?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. + * Details of operations that perform import Feature values. */ - export interface Schema$GoogleCloudAiplatformV1beta1FractionSplit { + export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesOperationMetadata { /** - * The fraction of the input data that is to be used to evaluate the Model. + * List of ImportFeatureValues operations running under a single EntityType that are blocking this operation. */ - testFraction?: number | null; + blockingOperationIds?: string[] | null; /** - * The fraction of the input data that is to be used to train the Model. + * Operation metadata for Featurestore import Feature values. */ - trainingFraction?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * The fraction of the input data that is to be used to validate the Model. + * Number of entities that have been imported by the operation. */ - validationFraction?: number | null; - } - /** - * Input for fulfillment metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentInput { + importedEntityCount?: string | null; /** - * Required. Fulfillment instance. + * Number of Feature values that have been imported by the operation. */ - instance?: Schema$GoogleCloudAiplatformV1beta1FulfillmentInstance; + importedFeatureValueCount?: string | null; /** - * Required. Spec for fulfillment score metric. + * The number of rows in input source that weren't imported due to either * Not having any featureValues. * Having a null entityId. * Having a null timestamp. * Not being parsable (applicable for CSV sources). */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1FulfillmentSpec; - } - /** - * Spec for fulfillment instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentInstance { + invalidRowCount?: string | null; /** - * Required. Inference instruction prompt to compare prediction with. + * The source URI from where Feature values are imported. */ - instruction?: string | null; + sourceUris?: string[] | null; /** - * Required. Output of the evaluated model. + * The number rows that weren't ingested due to having timestamps outside the retention boundary. */ - prediction?: string | null; + timestampOutsideRetentionRowsCount?: string | null; } /** - * Spec for fulfillment result. + * Request message for FeaturestoreService.ImportFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentResult { + export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesRequest { + avroSource?: Schema$GoogleCloudAiplatformV1beta1AvroSource; + bigquerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; + csvSource?: Schema$GoogleCloudAiplatformV1beta1CsvSource; /** - * Output only. Confidence for fulfillment score. + * If true, API doesn't start ingestion analysis pipeline. */ - confidence?: number | null; + disableIngestionAnalysis?: boolean | null; /** - * Output only. Explanation for fulfillment score. + * If set, data will not be imported for online serving. This is typically used for backfilling, where Feature generation timestamps are not in the timestamp range needed for online serving. */ - explanation?: string | null; + disableOnlineServing?: boolean | null; /** - * Output only. Fulfillment score. + * Source column that holds entity IDs. If not provided, entity IDs are extracted from the column named entity_id. */ - score?: number | null; - } - /** - * Spec for fulfillment metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FulfillmentSpec { + entityIdField?: string | null; /** - * Optional. Which version to use for evaluation. + * Required. Specifications defining which Feature values to import from the entity. The request fails if no feature_specs are provided, and having multiple feature_specs for one Feature is not allowed. */ - version?: number | null; - } - /** - * A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. - */ - export interface Schema$GoogleCloudAiplatformV1beta1FunctionCall { + featureSpecs?: Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesRequestFeatureSpec[]; /** - * Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + * Single Feature timestamp for all entities being imported. The timestamp must not have higher than millisecond precision. */ - args?: {[key: string]: any} | null; + featureTime?: string | null; /** - * Required. The name of the function to call. Matches [FunctionDeclaration.name]. + * Source column that holds the Feature timestamp for all Feature values in each entity. */ - name?: string | null; + featureTimeField?: string | null; + /** + * Specifies the number of workers that are used to write data to the Featurestore. Consider the online serving capacity that you require to achieve the desired import throughput without interfering with online serving. The value must be positive, and less than or equal to 100. If not set, defaults to using 1 worker. The low count ensures minimal impact on online serving performance. + */ + workerCount?: number | null; } /** - * Function calling config. + * Defines the Feature value(s) to import. */ - export interface Schema$GoogleCloudAiplatformV1beta1FunctionCallingConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesRequestFeatureSpec { /** - * Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. + * Required. ID of the Feature to import values of. This Feature must exist in the target EntityType, or the request will fail. */ - allowedFunctionNames?: string[] | null; + id?: string | null; /** - * Optional. Function calling mode. + * Source column to get the Feature values from. If not set, uses the column with the same name as the Feature ID. */ - mode?: string | null; + sourceField?: string | null; } /** - * Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client. + * Response message for FeaturestoreService.ImportFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1beta1FunctionDeclaration { + export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesResponse { /** - * Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. + * Number of entities that have been imported by the operation. */ - description?: string | null; + importedEntityCount?: string | null; /** - * Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64. + * Number of Feature values that have been imported by the operation. */ - name?: string | null; + importedFeatureValueCount?: string | null; /** - * Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1 + * The number of rows in input source that weren't imported due to either * Not having any featureValues. * Having a null entityId. * Having a null timestamp. * Not being parsable (applicable for CSV sources). */ - parameters?: Schema$GoogleCloudAiplatformV1beta1Schema; + invalidRowCount?: string | null; /** - * Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + * The number rows that weren't ingested due to having feature timestamps outside the retention boundary. */ - response?: Schema$GoogleCloudAiplatformV1beta1Schema; + timestampOutsideRetentionRowsCount?: string | null; } /** - * The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. + * Request message for ModelService.ImportModelEvaluation */ - export interface Schema$GoogleCloudAiplatformV1beta1FunctionResponse { - /** - * Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. - */ - name?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1ImportModelEvaluationRequest { /** - * Required. The function response in JSON object format. + * Required. Model evaluation resource to be imported. */ - response?: {[key: string]: any} | null; + modelEvaluation?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluation; } /** - * The Google Cloud Storage location where the output is to be written to. + * Config for importing RagFiles. */ - export interface Schema$GoogleCloudAiplatformV1beta1GcsDestination { + export interface Schema$GoogleCloudAiplatformV1beta1ImportRagFilesConfig { /** - * Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist. + * Google Cloud Storage location. Supports importing individual files as well as entire Google Cloud Storage directories. Sample formats: - `gs://bucket_name/my_directory/object_name/my_file.txt` - `gs://bucket_name/my_directory` */ - outputUriPrefix?: string | null; - } - /** - * The Google Cloud Storage location for the input content. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GcsSource { + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; /** - * Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + * Google Drive location. Supports importing individual files as well as Google Drive folders. */ - uris?: string[] | null; + googleDriveSource?: Schema$GoogleCloudAiplatformV1beta1GoogleDriveSource; + /** + * Specifies the size and overlap of chunks after importing RagFiles. + */ + ragFileChunkingConfig?: Schema$GoogleCloudAiplatformV1beta1RagFileChunkingConfig; } /** - * Request message for NotebookInternalService.GenerateAccessToken. + * Request message for VertexRagDataService.ImportRagFiles. */ - export interface Schema$GoogleCloudAiplatformV1beta1GenerateAccessTokenRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ImportRagFilesRequest { /** - * Required. The VM identity token (a JWT) for authenticating the VM. https://cloud.google.com/compute/docs/instances/verifying-instance-identity + * Required. The config for the RagFiles to be synced and imported into the RagCorpus. VertexRagDataService.ImportRagFiles. */ - vmToken?: string | null; + importRagFilesConfig?: Schema$GoogleCloudAiplatformV1beta1ImportRagFilesConfig; } /** - * Response message for NotebookInternalService.GenerateToken. + * A representation of a collection of database items organized in a way that allows for approximate nearest neighbor (a.k.a ANN) algorithms search. */ - export interface Schema$GoogleCloudAiplatformV1beta1GenerateAccessTokenResponse { + export interface Schema$GoogleCloudAiplatformV1beta1Index { /** - * Short-lived access token string which may be used to access Google APIs. + * Output only. Timestamp when this Index was created. */ - accessToken?: string | null; + createTime?: string | null; /** - * The time in seconds when the access token expires. Typically that's 3600. + * Output only. The pointers to DeployedIndexes created from this Index. An Index can be only deleted if all its DeployedIndexes had been undeployed first. */ - expiresIn?: number | null; + deployedIndexes?: Schema$GoogleCloudAiplatformV1beta1DeployedIndexRef[]; /** - * Space-separated list of scopes contained in the returned token. https://cloud.google.com/docs/authentication/token-types#access-contents + * The description of the Index. */ - scope?: string | null; + description?: string | null; /** - * Type of the returned access token (e.g. "Bearer"). It specifies how the token must be used. Bearer tokens may be used by any entity without proof of identity. + * Required. The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - tokenType?: string | null; - } - /** - * Request message for [PredictionService.GenerateContent]. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentRequest { + displayName?: string | null; /** - * Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. + * Immutable. Customer-managed encryption key spec for an Index. If set, this Index and all sub-resources of this Index will be secured by this key. */ - contents?: Schema$GoogleCloudAiplatformV1beta1Content[]; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Optional. Generation config. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - generationConfig?: Schema$GoogleCloudAiplatformV1beta1GenerationConfig; + etag?: string | null; /** - * Optional. Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates. + * Output only. Stats of the index resource. */ - safetySettings?: Schema$GoogleCloudAiplatformV1beta1SafetySetting[]; + indexStats?: Schema$GoogleCloudAiplatformV1beta1IndexStats; /** - * Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + * Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. */ - systemInstruction?: Schema$GoogleCloudAiplatformV1beta1Content; + indexUpdateMethod?: string | null; /** - * Optional. Tool config. This config is shared for all tools provided in the request. + * The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - toolConfig?: Schema$GoogleCloudAiplatformV1beta1ToolConfig; + labels?: {[key: string]: string} | null; /** - * Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. + * An additional information about the Index; the schema of the metadata can be found in metadata_schema. */ - tools?: Schema$GoogleCloudAiplatformV1beta1Tool[]; - } - /** - * Response message for [PredictionService.GenerateContent]. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentResponse { + metadata?: any | null; /** - * Output only. Generated candidates. + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Index, that is specific to it. Unset if the Index does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - candidates?: Schema$GoogleCloudAiplatformV1beta1Candidate[]; + metadataSchemaUri?: string | null; /** - * Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. + * Output only. The resource name of the Index. */ - promptFeedback?: Schema$GoogleCloudAiplatformV1beta1GenerateContentResponsePromptFeedback; + name?: string | null; /** - * Usage metadata about the response(s). + * Output only. Timestamp when this Index was most recently updated. This also includes any update to the contents of the Index. Note that Operations working on this Index may have their Operations.metadata.generic_metadata.update_time a little after the value of this timestamp, yet that does not mean their results are not already reflected in the Index. Result of any successfully completed Operation on the Index is reflected in it. */ - usageMetadata?: Schema$GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetadata; + updateTime?: string | null; } /** - * Content filter results for a prompt sent in the request. + * A datapoint of Index. */ - export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentResponsePromptFeedback { + export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapoint { /** - * Output only. Blocked reason. + * Optional. CrowdingTag of the datapoint, the number of neighbors to return in each crowding can be configured during query. */ - blockReason?: string | null; + crowdingTag?: Schema$GoogleCloudAiplatformV1beta1IndexDatapointCrowdingTag; /** - * Output only. A readable block reason message. + * Required. Unique identifier of the datapoint. */ - blockReasonMessage?: string | null; + datapointId?: string | null; /** - * Output only. Safety ratings. + * Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. */ - safetyRatings?: Schema$GoogleCloudAiplatformV1beta1SafetyRating[]; + featureVector?: number[] | null; + /** + * Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. + */ + numericRestricts?: Schema$GoogleCloudAiplatformV1beta1IndexDatapointNumericRestriction[]; + /** + * Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses categorical tokens. See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering + */ + restricts?: Schema$GoogleCloudAiplatformV1beta1IndexDatapointRestriction[]; + /** + * Optional. Feature embedding vector for sparse index. + */ + sparseEmbedding?: Schema$GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding; } /** - * Usage metadata about response(s). + * Crowding tag is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. */ - export interface Schema$GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetadata { - /** - * Number of tokens in the response(s). - */ - candidatesTokenCount?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapointCrowdingTag { /** - * Number of tokens in the request. + * The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. */ - promptTokenCount?: number | null; - totalTokenCount?: number | null; + crowdingAttribute?: string | null; } /** - * Generation config. + * This field allows restricts to be based on numeric comparisons rather than categorical tokens. */ - export interface Schema$GoogleCloudAiplatformV1beta1GenerationConfig { + export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapointNumericRestriction { /** - * Optional. Number of candidates to generate. + * The namespace of this restriction. e.g.: cost. */ - candidateCount?: number | null; + namespace?: string | null; /** - * Optional. Frequency penalties. + * This MUST be specified for queries and must NOT be specified for datapoints. */ - frequencyPenalty?: number | null; + op?: string | null; /** - * Optional. The maximum number of output tokens to generate per message. + * Represents 64 bit float. */ - maxOutputTokens?: number | null; + valueDouble?: number | null; /** - * Optional. Positive penalties. + * Represents 32 bit float. */ - presencePenalty?: number | null; + valueFloat?: number | null; /** - * Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. + * Represents 64 bit integer. */ - responseMimeType?: string | null; + valueInt?: string | null; + } + /** + * Restriction of a datapoint which describe its attributes(tokens) from each of several attribute categories(namespaces). + */ + export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapointRestriction { /** - * Optional. Control Three levels of creativity in the model output. Default: RESPONSE_STYLE_BALANCED + * The attributes to allow in this namespace. e.g.: 'red' */ - responseStyle?: string | null; + allowList?: string[] | null; /** - * Optional. Stop sequences. + * The attributes to deny in this namespace. e.g.: 'blue' */ - stopSequences?: string[] | null; + denyList?: string[] | null; /** - * Optional. Controls the randomness of predictions. + * The namespace of this restriction. e.g.: color. */ - temperature?: number | null; + namespace?: string | null; + } + /** + * Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. + */ + export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding { /** - * Optional. If specified, top-k sampling will be used. + * Required. The list of indexes for the embedding values of the sparse vector. */ - topK?: number | null; + dimensions?: string[] | null; /** - * Optional. If specified, nucleus sampling will be used. + * Required. The list of embedding values of the sparse vector. */ - topP?: number | null; + values?: number[] | null; } /** - * Generic Metadata shared by all operations. + * Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes. */ - export interface Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1IndexEndpoint { /** - * Output only. Time when the operation was created. + * Output only. Timestamp when this IndexEndpoint was created. */ createTime?: string | null; /** - * Output only. Partial failures encountered. E.g. single files that couldn't be read. This field should never exceed 20 entries. Status details field will contain standard Google Cloud error details. + * Output only. The indexes deployed in this endpoint. */ - partialFailures?: Schema$GoogleRpcStatus[]; + deployedIndexes?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex[]; /** - * Output only. Time when the operation was updated for the last time. If the operation has finished (successfully or not), this is the finish time. + * The description of the IndexEndpoint. */ - updateTime?: string | null; - } - /** - * Contains information about the source of the models generated from Generative AI Studio. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GenieSource { + description?: string | null; /** - * Required. The public base model URI. + * Required. The display name of the IndexEndpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - baseModelUri?: string | null; - } - /** - * The Google Drive location for the input content. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GoogleDriveSource { + displayName?: string | null; /** - * Required. Google Drive resource IDs. + * Optional. Deprecated: If true, expose the IndexEndpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. */ - resourceIds?: Schema$GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId[]; - } - /** - * The type and ID of the Google Drive resource. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId { + enablePrivateServiceConnect?: boolean | null; /** - * Required. The ID of the Google Drive resource. + * Immutable. Customer-managed encryption key spec for an IndexEndpoint. If set, this IndexEndpoint and all sub-resources of this IndexEndpoint will be secured by this key. */ - resourceId?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Required. The type of the Google Drive resource. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - resourceType?: string | null; - } - /** - * Input for groundedness metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GroundednessInput { + etag?: string | null; /** - * Required. Groundedness instance. + * The labels with user-defined metadata to organize your IndexEndpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - instance?: Schema$GoogleCloudAiplatformV1beta1GroundednessInstance; + labels?: {[key: string]: string} | null; /** - * Required. Spec for groundedness metric. + * Output only. The resource name of the IndexEndpoint. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1GroundednessSpec; - } - /** - * Spec for groundedness instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GroundednessInstance { + name?: string | null; /** - * Required. Background information provided in context used to compare against the prediction. + * Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the IndexEndpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. network and private_service_connect_config are mutually exclusive. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in '12345', and {network\} is network name. */ - context?: string | null; + network?: string | null; /** - * Required. Output of the evaluated model. + * Optional. Configuration for private service connect. network and private_service_connect_config are mutually exclusive. */ - prediction?: string | null; - } - /** - * Spec for groundedness result. - */ - export interface Schema$GoogleCloudAiplatformV1beta1GroundednessResult { + privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig; /** - * Output only. Confidence for groundedness score. + * Output only. If public_endpoint_enabled is true, this field will be populated with the domain name to use for this index endpoint. */ - confidence?: number | null; + publicEndpointDomainName?: string | null; /** - * Output only. Explanation for groundedness score. + * Optional. If true, the deployed index will be accessible through public endpoint. */ - explanation?: string | null; + publicEndpointEnabled?: boolean | null; /** - * Output only. Groundedness score. + * Output only. Timestamp when this IndexEndpoint was last updated. This timestamp is not updated when the endpoint's DeployedIndexes are updated, e.g. due to updates of the original Indexes they are the deployments of. */ - score?: number | null; + updateTime?: string | null; } /** - * Spec for groundedness metric. + * IndexPrivateEndpoints proto is used to provide paths for users to send requests via private endpoints (e.g. private service access, private service connect). To send request via private service access, use match_grpc_address. To send request via private service connect, use service_attachment. */ - export interface Schema$GoogleCloudAiplatformV1beta1GroundednessSpec { + export interface Schema$GoogleCloudAiplatformV1beta1IndexPrivateEndpoints { /** - * Optional. Which version to use for evaluation. + * Output only. The ip address used to send match gRPC requests. */ - version?: number | null; + matchGrpcAddress?: string | null; + /** + * Output only. PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set. + */ + pscAutomatedEndpoints?: Schema$GoogleCloudAiplatformV1beta1PscAutomatedEndpoints[]; + /** + * Output only. The name of the service attachment resource. Populated if private service connect is enabled. + */ + serviceAttachment?: string | null; } /** - * Metadata returned to client when grounding is enabled. + * Stats of the Index. */ - export interface Schema$GoogleCloudAiplatformV1beta1GroundingMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1IndexStats { /** - * Optional. Queries executed by the retrieval tools. + * Output only. The number of shards in the Index. */ - retrievalQueries?: string[] | null; + shardsCount?: number | null; /** - * Optional. Google search entry for the following-up web searches. + * Output only. The number of sparse vectors in the Index. */ - searchEntryPoint?: Schema$GoogleCloudAiplatformV1beta1SearchEntryPoint; + sparseVectorsCount?: string | null; /** - * Optional. Web search queries for the following-up web search. + * Output only. The number of dense vectors in the Index. */ - webSearchQueries?: string[] | null; + vectorsCount?: string | null; } /** - * Represents a HyperparameterTuningJob. A HyperparameterTuningJob has a Study specification and multiple CustomJobs with identical CustomJob specification. + * Specifies Vertex AI owned input data to be used for training, and possibly evaluating, the Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1HyperparameterTuningJob { + export interface Schema$GoogleCloudAiplatformV1beta1InputDataConfig { /** - * Output only. Time when the HyperparameterTuningJob was created. - */ - createTime?: string | null; - /** - * Required. The display name of the HyperparameterTuningJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. - */ - displayName?: string | null; - /** - * Customer-managed encryption key options for a HyperparameterTuningJob. If this is set, then all resources created by the HyperparameterTuningJob will be encrypted with the provided encryption key. - */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; - /** - * Output only. Time when the HyperparameterTuningJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. - */ - endTime?: string | null; - /** - * Output only. Only populated when job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. + * Applicable only to custom training with Datasets that have DataItems and Annotations. Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. */ - error?: Schema$GoogleRpcStatus; + annotationSchemaUri?: string | null; /** - * The labels with user-defined metadata to organize HyperparameterTuningJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Applicable only to Datasets that have DataItems and Annotations. A filter on Annotations of the Dataset. Only Annotations that both match this filter and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on (for the auto-assigned that role is decided by Vertex AI). A filter with same syntax as the one used in ListAnnotations may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. */ - labels?: {[key: string]: string} | null; + annotationsFilter?: string | null; /** - * The number of failed Trials that need to be seen before failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail before the whole job fails. + * Only applicable to custom training with tabular Dataset with BigQuery source. The BigQuery project location where the training data is to be written to. In the given project a new dataset is created with name `dataset___` where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training input data is written into that dataset. In the dataset three tables are created, `training`, `validation` and `test`. * AIP_DATA_FORMAT = "bigquery". * AIP_TRAINING_DATA_URI = "bigquery_destination.dataset___.training" * AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset___.validation" * AIP_TEST_DATA_URI = "bigquery_destination.dataset___.test" */ - maxFailedTrialCount?: number | null; + bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; /** - * Required. The desired total number of Trials. + * Required. The ID of the Dataset in the same Project and Location which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's training_task_definition. For tabular Datasets, all their data is exported to training, to pick and choose from. */ - maxTrialCount?: number | null; + datasetId?: string | null; /** - * Output only. Resource name of the HyperparameterTuningJob. + * Split based on the provided filters for each set. */ - name?: string | null; + filterSplit?: Schema$GoogleCloudAiplatformV1beta1FilterSplit; /** - * Required. The desired number of Trials to run in parallel. + * Split based on fractions defining the size of each set. */ - parallelTrialCount?: number | null; + fractionSplit?: Schema$GoogleCloudAiplatformV1beta1FractionSplit; /** - * Output only. Time when the HyperparameterTuningJob for the first time entered the `JOB_STATE_RUNNING` state. + * The Cloud Storage location where the training data is to be written to. In the given directory a new directory is created with name: `dataset---` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All training input data is written into that directory. The Vertex AI environment variables representing Cloud Storage data URIs are represented in the Cloud Storage wildcard format to support sharded data. e.g.: "gs://.../training-*.jsonl" * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data * AIP_TRAINING_DATA_URI = "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT\}" * AIP_VALIDATION_DATA_URI = "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT\}" * AIP_TEST_DATA_URI = "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT\}" */ - startTime?: string | null; + gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; /** - * Output only. The detailed state of the job. + * Whether to persist the ML use assignment to data item system labels. */ - state?: string | null; + persistMlUseAssignment?: boolean | null; /** - * Required. Study configuration of the HyperparameterTuningJob. + * Supported only for tabular Datasets. Split based on a predefined key. */ - studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; + predefinedSplit?: Schema$GoogleCloudAiplatformV1beta1PredefinedSplit; /** - * Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. + * Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. */ - trialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; + savedQueryId?: string | null; /** - * Output only. Trials of the HyperparameterTuningJob. + * Supported only for tabular Datasets. Split based on the distribution of the specified column. */ - trials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; + stratifiedSplit?: Schema$GoogleCloudAiplatformV1beta1StratifiedSplit; /** - * Output only. Time when the HyperparameterTuningJob was most recently updated. + * Supported only for tabular Datasets. Split based on the timestamp of the input data pieces. */ - updateTime?: string | null; + timestampSplit?: Schema$GoogleCloudAiplatformV1beta1TimestampSplit; } /** - * Matcher for Features of an EntityType by Feature ID. + * A list of int64 values. */ - export interface Schema$GoogleCloudAiplatformV1beta1IdMatcher { + export interface Schema$GoogleCloudAiplatformV1beta1Int64Array { /** - * Required. The following are accepted as `ids`: * A single-element list containing only `*`, which selects all Features in the target EntityType, or * A list containing only Feature IDs, which selects only Features with those IDs in the target EntityType. + * A list of int64 values. */ - ids?: string[] | null; + values?: string[] | null; } /** - * Describes the location from where we import data into a Dataset, together with the labels that will be applied to the DataItems and the Annotations. + * An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportDataConfig { - /** - * Labels that will be applied to newly imported Annotations. If two Annotations are identical, one of them will be deduped. Two Annotations are considered identical if their payload, payload_schema_uri and all of their labels are the same. These labels will be overridden by Annotation labels specified inside index file referenced by import_schema_uri, e.g. jsonl file. - */ - annotationLabels?: {[key: string]: string} | null; + export interface Schema$GoogleCloudAiplatformV1beta1IntegratedGradientsAttribution { /** - * Labels that will be applied to newly imported DataItems. If an identical DataItem as one being imported already exists in the Dataset, then these labels will be appended to these of the already existing one, and if labels with identical key is imported before, the old label value will be overwritten. If two DataItems are identical in the same import data operation, the labels will be combined and if key collision happens in this case, one of the values will be picked randomly. Two DataItems are considered identical if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation labels specified inside index file referenced by import_schema_uri, e.g. jsonl file. + * Config for IG with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 */ - dataItemLabels?: {[key: string]: string} | null; + blurBaselineConfig?: Schema$GoogleCloudAiplatformV1beta1BlurBaselineConfig; /** - * The Google Cloud Storage location for the input content. + * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + smoothGradConfig?: Schema$GoogleCloudAiplatformV1beta1SmoothGradConfig; /** - * Required. Points to a YAML file stored on Google Cloud Storage describing the import format. Validation will be done against the schema. The schema is defined as an [OpenAPI 3.0.2 Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). + * Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. */ - importSchemaUri?: string | null; + stepCount?: number | null; } /** - * Runtime operation information for DatasetService.ImportData. + * Request message for [InternalOsServiceStateInstance]. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportDataOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1InternalOsServiceStateInstance { /** - * The common part of the operation metadata. + * Required. internal service name. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for DatasetService.ImportData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportDataRequest { + serviceName?: string | null; /** - * Required. The desired input locations. The contents of all input locations will be imported in one batch. + * Required. internal service state. */ - importConfigs?: Schema$GoogleCloudAiplatformV1beta1ImportDataConfig[]; + serviceState?: string | null; } /** - * Response message for DatasetService.ImportData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportDataResponse {} - /** - * Details of ExtensionRegistryService.ImportExtension operation. + * Contains information about the Large Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportExtensionOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1LargeModelReference { /** - * The common part of the operation metadata. + * Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + name?: string | null; } /** - * Details of operations that perform import Feature values. + * A subgraph of the overall lineage graph. Event edges connect Artifact and Execution nodes. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesOperationMetadata { - /** - * List of ImportFeatureValues operations running under a single EntityType that are blocking this operation. - */ - blockingOperationIds?: string[] | null; - /** - * Operation metadata for Featurestore import Feature values. - */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + export interface Schema$GoogleCloudAiplatformV1beta1LineageSubgraph { /** - * Number of entities that have been imported by the operation. + * The Artifact nodes in the subgraph. */ - importedEntityCount?: string | null; + artifacts?: Schema$GoogleCloudAiplatformV1beta1Artifact[]; /** - * Number of Feature values that have been imported by the operation. + * The Event edges between Artifacts and Executions in the subgraph. */ - importedFeatureValueCount?: string | null; + events?: Schema$GoogleCloudAiplatformV1beta1Event[]; /** - * The number of rows in input source that weren't imported due to either * Not having any featureValues. * Having a null entityId. * Having a null timestamp. * Not being parsable (applicable for CSV sources). + * The Execution nodes in the subgraph. */ - invalidRowCount?: string | null; + executions?: Schema$GoogleCloudAiplatformV1beta1Execution[]; + } + /** + * Response message for DatasetService.ListAnnotations. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListAnnotationsResponse { /** - * The source URI from where Feature values are imported. + * A list of Annotations that matches the specified filter in the request. */ - sourceUris?: string[] | null; + annotations?: Schema$GoogleCloudAiplatformV1beta1Annotation[]; /** - * The number rows that weren't ingested due to having timestamps outside the retention boundary. + * The standard List next-page token. */ - timestampOutsideRetentionRowsCount?: string | null; + nextPageToken?: string | null; } /** - * Request message for FeaturestoreService.ImportFeatureValues. + * Response message for MetadataService.ListArtifacts. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesRequest { - avroSource?: Schema$GoogleCloudAiplatformV1beta1AvroSource; - bigquerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; - csvSource?: Schema$GoogleCloudAiplatformV1beta1CsvSource; - /** - * If true, API doesn't start ingestion analysis pipeline. - */ - disableIngestionAnalysis?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1ListArtifactsResponse { /** - * If set, data will not be imported for online serving. This is typically used for backfilling, where Feature generation timestamps are not in the timestamp range needed for online serving. + * The Artifacts retrieved from the MetadataStore. */ - disableOnlineServing?: boolean | null; + artifacts?: Schema$GoogleCloudAiplatformV1beta1Artifact[]; /** - * Source column that holds entity IDs. If not provided, entity IDs are extracted from the column named entity_id. + * A token, which can be sent as ListArtifactsRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. */ - entityIdField?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for JobService.ListBatchPredictionJobs + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListBatchPredictionJobsResponse { /** - * Required. Specifications defining which Feature values to import from the entity. The request fails if no feature_specs are provided, and having multiple feature_specs for one Feature is not allowed. + * List of BatchPredictionJobs in the requested page. */ - featureSpecs?: Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesRequestFeatureSpec[]; + batchPredictionJobs?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJob[]; /** - * Single Feature timestamp for all entities being imported. The timestamp must not have higher than millisecond precision. + * A token to retrieve the next page of results. Pass to ListBatchPredictionJobsRequest.page_token to obtain that page. */ - featureTime?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for MetadataService.ListContexts. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListContextsResponse { /** - * Source column that holds the Feature timestamp for all Feature values in each entity. + * The Contexts retrieved from the MetadataStore. */ - featureTimeField?: string | null; + contexts?: Schema$GoogleCloudAiplatformV1beta1Context[]; /** - * Specifies the number of workers that are used to write data to the Featurestore. Consider the online serving capacity that you require to achieve the desired import throughput without interfering with online serving. The value must be positive, and less than or equal to 100. If not set, defaults to using 1 worker. The low count ensures minimal impact on online serving performance. + * A token, which can be sent as ListContextsRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. */ - workerCount?: number | null; + nextPageToken?: string | null; } /** - * Defines the Feature value(s) to import. + * Response message for JobService.ListCustomJobs */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesRequestFeatureSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ListCustomJobsResponse { /** - * Required. ID of the Feature to import values of. This Feature must exist in the target EntityType, or the request will fail. + * List of CustomJobs in the requested page. */ - id?: string | null; + customJobs?: Schema$GoogleCloudAiplatformV1beta1CustomJob[]; /** - * Source column to get the Feature values from. If not set, uses the column with the same name as the Feature ID. + * A token to retrieve the next page of results. Pass to ListCustomJobsRequest.page_token to obtain that page. */ - sourceField?: string | null; + nextPageToken?: string | null; } /** - * Response message for FeaturestoreService.ImportFeatureValues. + * Response message for DatasetService.ListDataItems. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportFeatureValuesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListDataItemsResponse { /** - * Number of entities that have been imported by the operation. + * A list of DataItems that matches the specified filter in the request. */ - importedEntityCount?: string | null; + dataItems?: Schema$GoogleCloudAiplatformV1beta1DataItem[]; /** - * Number of Feature values that have been imported by the operation. + * The standard List next-page token. */ - importedFeatureValueCount?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for JobService.ListDataLabelingJobs. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListDataLabelingJobsResponse { /** - * The number of rows in input source that weren't imported due to either * Not having any featureValues. * Having a null entityId. * Having a null timestamp. * Not being parsable (applicable for CSV sources). + * A list of DataLabelingJobs that matches the specified filter in the request. */ - invalidRowCount?: string | null; + dataLabelingJobs?: Schema$GoogleCloudAiplatformV1beta1DataLabelingJob[]; /** - * The number rows that weren't ingested due to having feature timestamps outside the retention boundary. + * The standard List next-page token. */ - timestampOutsideRetentionRowsCount?: string | null; + nextPageToken?: string | null; } /** - * Request message for ModelService.ImportModelEvaluation + * Response message for DatasetService.ListDatasets. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportModelEvaluationRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ListDatasetsResponse { /** - * Required. Model evaluation resource to be imported. + * A list of Datasets that matches the specified filter in the request. */ - modelEvaluation?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluation; + datasets?: Schema$GoogleCloudAiplatformV1beta1Dataset[]; + /** + * The standard List next-page token. + */ + nextPageToken?: string | null; } /** - * Config for importing RagFiles. + * Response message for DatasetService.ListDatasetVersions. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportRagFilesConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ListDatasetVersionsResponse { /** - * Google Cloud Storage location. Supports importing individual files as well as entire Google Cloud Storage directories. Sample formats: - `gs://bucket_name/my_directory/object_name/my_file.txt` - `gs://bucket_name/my_directory` - */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; - /** - * Google Drive location. Supports importing individual files as well as Google Drive folders. + * A list of DatasetVersions that matches the specified filter in the request. */ - googleDriveSource?: Schema$GoogleCloudAiplatformV1beta1GoogleDriveSource; + datasetVersions?: Schema$GoogleCloudAiplatformV1beta1DatasetVersion[]; /** - * Specifies the size and overlap of chunks after importing RagFiles. + * The standard List next-page token. */ - ragFileChunkingConfig?: Schema$GoogleCloudAiplatformV1beta1RagFileChunkingConfig; + nextPageToken?: string | null; } /** - * Request message for VertexRagDataService.ImportRagFiles. + * Response message for ListDeploymentResourcePools method. */ - export interface Schema$GoogleCloudAiplatformV1beta1ImportRagFilesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ListDeploymentResourcePoolsResponse { /** - * Required. The config for the RagFiles to be synced and imported into the RagCorpus. VertexRagDataService.ImportRagFiles. + * The DeploymentResourcePools from the specified location. */ - importRagFilesConfig?: Schema$GoogleCloudAiplatformV1beta1ImportRagFilesConfig; + deploymentResourcePools?: Schema$GoogleCloudAiplatformV1beta1DeploymentResourcePool[]; + /** + * A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. + */ + nextPageToken?: string | null; } /** - * A representation of a collection of database items organized in a way that allows for approximate nearest neighbor (a.k.a ANN) algorithms search. + * Response message for EndpointService.ListEndpoints. */ - export interface Schema$GoogleCloudAiplatformV1beta1Index { - /** - * Output only. Timestamp when this Index was created. - */ - createTime?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1ListEndpointsResponse { /** - * Output only. The pointers to DeployedIndexes created from this Index. An Index can be only deleted if all its DeployedIndexes had been undeployed first. + * List of Endpoints in the requested page. */ - deployedIndexes?: Schema$GoogleCloudAiplatformV1beta1DeployedIndexRef[]; + endpoints?: Schema$GoogleCloudAiplatformV1beta1Endpoint[]; /** - * The description of the Index. + * A token to retrieve the next page of results. Pass to ListEndpointsRequest.page_token to obtain that page. */ - description?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for FeaturestoreService.ListEntityTypes. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListEntityTypesResponse { /** - * Required. The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * The EntityTypes matching the request. */ - displayName?: string | null; + entityTypes?: Schema$GoogleCloudAiplatformV1beta1EntityType[]; /** - * Immutable. Customer-managed encryption key spec for an Index. If set, this Index and all sub-resources of this Index will be secured by this key. + * A token, which can be sent as ListEntityTypesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + nextPageToken?: string | null; + } + /** + * Response message for MetadataService.ListExecutions. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListExecutionsResponse { /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The Executions retrieved from the MetadataStore. */ - etag?: string | null; + executions?: Schema$GoogleCloudAiplatformV1beta1Execution[]; /** - * Output only. Stats of the index resource. + * A token, which can be sent as ListExecutionsRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. */ - indexStats?: Schema$GoogleCloudAiplatformV1beta1IndexStats; + nextPageToken?: string | null; + } + /** + * Response message for ExtensionRegistryService.ListExtensions + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListExtensionsResponse { /** - * Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. + * List of Extension in the requested page. */ - indexUpdateMethod?: string | null; + extensions?: Schema$GoogleCloudAiplatformV1beta1Extension[]; /** - * The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * A token to retrieve the next page of results. Pass to ListExtensionsRequest.page_token to obtain that page. */ - labels?: {[key: string]: string} | null; + nextPageToken?: string | null; + } + /** + * Response message for FeatureRegistryService.ListFeatureGroups. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureGroupsResponse { /** - * An additional information about the Index; the schema of the metadata can be found in metadata_schema. + * The FeatureGroups matching the request. */ - metadata?: any | null; + featureGroups?: Schema$GoogleCloudAiplatformV1beta1FeatureGroup[]; /** - * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Index, that is specific to it. Unset if the Index does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + * A token, which can be sent as ListFeatureGroupsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - metadataSchemaUri?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for FeatureOnlineStoreAdminService.ListFeatureOnlineStores. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureOnlineStoresResponse { /** - * Output only. The resource name of the Index. + * The FeatureOnlineStores matching the request. */ - name?: string | null; + featureOnlineStores?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStore[]; /** - * Output only. Timestamp when this Index was most recently updated. This also includes any update to the contents of the Index. Note that Operations working on this Index may have their Operations.metadata.generic_metadata.update_time a little after the value of this timestamp, yet that does not mean their results are not already reflected in the Index. Result of any successfully completed Operation on the Index is reflected in it. + * A token, which can be sent as ListFeatureOnlineStoresRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - updateTime?: string | null; + nextPageToken?: string | null; } /** - * A datapoint of Index. + * Response message for FeaturestoreService.ListFeatures. Response message for FeatureRegistryService.ListFeatures. */ - export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapoint { - /** - * Optional. CrowdingTag of the datapoint, the number of neighbors to return in each crowding can be configured during query. - */ - crowdingTag?: Schema$GoogleCloudAiplatformV1beta1IndexDatapointCrowdingTag; + export interface Schema$GoogleCloudAiplatformV1beta1ListFeaturesResponse { /** - * Required. Unique identifier of the datapoint. + * The Features matching the request. */ - datapointId?: string | null; + features?: Schema$GoogleCloudAiplatformV1beta1Feature[]; /** - * Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + * A token, which can be sent as ListFeaturesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - featureVector?: number[] | null; + nextPageToken?: string | null; + } + /** + * Response message for FeaturestoreService.ListFeaturestores. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListFeaturestoresResponse { /** - * Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. + * The Featurestores matching the request. */ - numericRestricts?: Schema$GoogleCloudAiplatformV1beta1IndexDatapointNumericRestriction[]; + featurestores?: Schema$GoogleCloudAiplatformV1beta1Featurestore[]; /** - * Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses categorical tokens. See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering + * A token, which can be sent as ListFeaturestoresRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - restricts?: Schema$GoogleCloudAiplatformV1beta1IndexDatapointRestriction[]; + nextPageToken?: string | null; } /** - * Crowding tag is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. + * Response message for FeatureOnlineStoreAdminService.ListFeatureViews. */ - export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapointCrowdingTag { + export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureViewsResponse { /** - * The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. + * The FeatureViews matching the request. */ - crowdingAttribute?: string | null; + featureViews?: Schema$GoogleCloudAiplatformV1beta1FeatureView[]; + /** + * A token, which can be sent as ListFeatureViewsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + */ + nextPageToken?: string | null; } /** - * This field allows restricts to be based on numeric comparisons rather than categorical tokens. + * Response message for FeatureOnlineStoreAdminService.ListFeatureViewSyncs. */ - export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapointNumericRestriction { - /** - * The namespace of this restriction. e.g.: cost. - */ - namespace?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureViewSyncsResponse { /** - * This MUST be specified for queries and must NOT be specified for datapoints. + * The FeatureViewSyncs matching the request. */ - op?: string | null; + featureViewSyncs?: Schema$GoogleCloudAiplatformV1beta1FeatureViewSync[]; /** - * Represents 64 bit float. + * A token, which can be sent as ListFeatureViewSyncsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - valueDouble?: number | null; + nextPageToken?: string | null; + } + /** + * Response message for JobService.ListHyperparameterTuningJobs + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListHyperparameterTuningJobsResponse { /** - * Represents 32 bit float. + * List of HyperparameterTuningJobs in the requested page. HyperparameterTuningJob.trials of the jobs will be not be returned. */ - valueFloat?: number | null; + hyperparameterTuningJobs?: Schema$GoogleCloudAiplatformV1beta1HyperparameterTuningJob[]; /** - * Represents 64 bit integer. + * A token to retrieve the next page of results. Pass to ListHyperparameterTuningJobsRequest.page_token to obtain that page. */ - valueInt?: string | null; + nextPageToken?: string | null; } /** - * Restriction of a datapoint which describe its attributes(tokens) from each of several attribute categories(namespaces). + * Response message for IndexEndpointService.ListIndexEndpoints. */ - export interface Schema$GoogleCloudAiplatformV1beta1IndexDatapointRestriction { - /** - * The attributes to allow in this namespace. e.g.: 'red' - */ - allowList?: string[] | null; + export interface Schema$GoogleCloudAiplatformV1beta1ListIndexEndpointsResponse { /** - * The attributes to deny in this namespace. e.g.: 'blue' + * List of IndexEndpoints in the requested page. */ - denyList?: string[] | null; + indexEndpoints?: Schema$GoogleCloudAiplatformV1beta1IndexEndpoint[]; /** - * The namespace of this restriction. e.g.: color. + * A token to retrieve next page of results. Pass to ListIndexEndpointsRequest.page_token to obtain that page. */ - namespace?: string | null; + nextPageToken?: string | null; } /** - * Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes. + * Response message for IndexService.ListIndexes. */ - export interface Schema$GoogleCloudAiplatformV1beta1IndexEndpoint { + export interface Schema$GoogleCloudAiplatformV1beta1ListIndexesResponse { /** - * Output only. Timestamp when this IndexEndpoint was created. + * List of indexes in the requested page. */ - createTime?: string | null; + indexes?: Schema$GoogleCloudAiplatformV1beta1Index[]; /** - * Output only. The indexes deployed in this endpoint. + * A token to retrieve next page of results. Pass to ListIndexesRequest.page_token to obtain that page. */ - deployedIndexes?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex[]; + nextPageToken?: string | null; + } + /** + * Response message for MetadataService.ListMetadataSchemas. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListMetadataSchemasResponse { /** - * The description of the IndexEndpoint. + * The MetadataSchemas found for the MetadataStore. */ - description?: string | null; + metadataSchemas?: Schema$GoogleCloudAiplatformV1beta1MetadataSchema[]; /** - * Required. The display name of the IndexEndpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * A token, which can be sent as ListMetadataSchemasRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. */ - displayName?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for MetadataService.ListMetadataStores. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListMetadataStoresResponse { /** - * Optional. Deprecated: If true, expose the IndexEndpoint via private service connect. Only one of the fields, network or enable_private_service_connect, can be set. + * The MetadataStores found for the Location. */ - enablePrivateServiceConnect?: boolean | null; + metadataStores?: Schema$GoogleCloudAiplatformV1beta1MetadataStore[]; /** - * Immutable. Customer-managed encryption key spec for an IndexEndpoint. If set, this IndexEndpoint and all sub-resources of this IndexEndpoint will be secured by this key. + * A token, which can be sent as ListMetadataStoresRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + nextPageToken?: string | null; + } + /** + * Response message for JobService.ListModelDeploymentMonitoringJobs. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListModelDeploymentMonitoringJobsResponse { /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * A list of ModelDeploymentMonitoringJobs that matches the specified filter in the request. */ - etag?: string | null; + modelDeploymentMonitoringJobs?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJob[]; /** - * The labels with user-defined metadata to organize your IndexEndpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The standard List next-page token. */ - labels?: {[key: string]: string} | null; + nextPageToken?: string | null; + } + /** + * Response message for ModelService.ListModelEvaluationSlices. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListModelEvaluationSlicesResponse { /** - * Output only. The resource name of the IndexEndpoint. + * List of ModelEvaluations in the requested page. */ - name?: string | null; + modelEvaluationSlices?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSlice[]; /** - * Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the IndexEndpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. network and private_service_connect_config are mutually exclusive. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in '12345', and {network\} is network name. + * A token to retrieve next page of results. Pass to ListModelEvaluationSlicesRequest.page_token to obtain that page. */ - network?: string | null; - /** - * Optional. Configuration for private service connect. network and private_service_connect_config are mutually exclusive. + nextPageToken?: string | null; + } + /** + * Response message for ModelService.ListModelEvaluations. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListModelEvaluationsResponse { + /** + * List of ModelEvaluations in the requested page. */ - privateServiceConnectConfig?: Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig; + modelEvaluations?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluation[]; /** - * Output only. If public_endpoint_enabled is true, this field will be populated with the domain name to use for this index endpoint. + * A token to retrieve next page of results. Pass to ListModelEvaluationsRequest.page_token to obtain that page. */ - publicEndpointDomainName?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for ModelMonitoringService.ListModelMonitoringJobs. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListModelMonitoringJobsResponse { /** - * Optional. If true, the deployed index will be accessible through public endpoint. + * A list of ModelMonitoringJobs that matches the specified filter in the request. */ - publicEndpointEnabled?: boolean | null; + modelMonitoringJobs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJob[]; /** - * Output only. Timestamp when this IndexEndpoint was last updated. This timestamp is not updated when the endpoint's DeployedIndexes are updated, e.g. due to updates of the original Indexes they are the deployments of. + * The standard List next-page token. */ - updateTime?: string | null; + nextPageToken?: string | null; } /** - * IndexPrivateEndpoints proto is used to provide paths for users to send requests via private endpoints (e.g. private service access, private service connect). To send request via private service access, use match_grpc_address. To send request via private service connect, use service_attachment. + * Response message for ModelMonitoringService.ListModelMonitors */ - export interface Schema$GoogleCloudAiplatformV1beta1IndexPrivateEndpoints { - /** - * Output only. The ip address used to send match gRPC requests. - */ - matchGrpcAddress?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1ListModelMonitorsResponse { /** - * Output only. PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set. + * List of ModelMonitor in the requested page. */ - pscAutomatedEndpoints?: Schema$GoogleCloudAiplatformV1beta1PscAutomatedEndpoints[]; + modelMonitors?: Schema$GoogleCloudAiplatformV1beta1ModelMonitor[]; /** - * Output only. The name of the service attachment resource. Populated if private service connect is enabled. + * A token to retrieve the next page of results. Pass to ListModelMonitorsRequest.page_token to obtain that page. */ - serviceAttachment?: string | null; + nextPageToken?: string | null; } /** - * Stats of the Index. + * Response message for ModelService.ListModels */ - export interface Schema$GoogleCloudAiplatformV1beta1IndexStats { + export interface Schema$GoogleCloudAiplatformV1beta1ListModelsResponse { /** - * Output only. The number of shards in the Index. + * List of Models in the requested page. */ - shardsCount?: number | null; + models?: Schema$GoogleCloudAiplatformV1beta1Model[]; /** - * Output only. The number of dense vectors in the Index. + * A token to retrieve next page of results. Pass to ListModelsRequest.page_token to obtain that page. */ - vectorsCount?: string | null; + nextPageToken?: string | null; } /** - * Specifies Vertex AI owned input data to be used for training, and possibly evaluating, the Model. + * Response message for ModelService.ListModelVersions */ - export interface Schema$GoogleCloudAiplatformV1beta1InputDataConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ListModelVersionsResponse { /** - * Applicable only to custom training with Datasets that have DataItems and Annotations. Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the chosen schema must be consistent with metadata of the Dataset specified by dataset_id. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both annotations_filter and annotation_schema_uri. + * List of Model versions in the requested page. In the returned Model name field, version ID instead of regvision tag will be included. */ - annotationSchemaUri?: string | null; + models?: Schema$GoogleCloudAiplatformV1beta1Model[]; /** - * Applicable only to Datasets that have DataItems and Annotations. A filter on Annotations of the Dataset. Only Annotations that both match this filter and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on (for the auto-assigned that role is decided by Vertex AI). A filter with same syntax as the one used in ListAnnotations may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. + * A token to retrieve the next page of results. Pass to ListModelVersionsRequest.page_token to obtain that page. */ - annotationsFilter?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for JobService.ListNasJobs + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListNasJobsResponse { /** - * Only applicable to custom training with tabular Dataset with BigQuery source. The BigQuery project location where the training data is to be written to. In the given project a new dataset is created with name `dataset___` where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training input data is written into that dataset. In the dataset three tables are created, `training`, `validation` and `test`. * AIP_DATA_FORMAT = "bigquery". * AIP_TRAINING_DATA_URI = "bigquery_destination.dataset___.training" * AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset___.validation" * AIP_TEST_DATA_URI = "bigquery_destination.dataset___.test" + * List of NasJobs in the requested page. NasJob.nas_job_output of the jobs will not be returned. */ - bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; + nasJobs?: Schema$GoogleCloudAiplatformV1beta1NasJob[]; /** - * Required. The ID of the Dataset in the same Project and Location which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's training_task_definition. For tabular Datasets, all their data is exported to training, to pick and choose from. + * A token to retrieve the next page of results. Pass to ListNasJobsRequest.page_token to obtain that page. */ - datasetId?: string | null; + nextPageToken?: string | null; + } + /** + * Response message for JobService.ListNasTrialDetails + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListNasTrialDetailsResponse { /** - * Split based on the provided filters for each set. + * List of top NasTrials in the requested page. */ - filterSplit?: Schema$GoogleCloudAiplatformV1beta1FilterSplit; + nasTrialDetails?: Schema$GoogleCloudAiplatformV1beta1NasTrialDetail[]; /** - * Split based on fractions defining the size of each set. + * A token to retrieve the next page of results. Pass to ListNasTrialDetailsRequest.page_token to obtain that page. */ - fractionSplit?: Schema$GoogleCloudAiplatformV1beta1FractionSplit; + nextPageToken?: string | null; + } + /** + * Response message for [NotebookService.CreateNotebookExecutionJob] + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListNotebookExecutionJobsResponse { /** - * The Cloud Storage location where the training data is to be written to. In the given directory a new directory is created with name: `dataset---` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All training input data is written into that directory. The Vertex AI environment variables representing Cloud Storage data URIs are represented in the Cloud Storage wildcard format to support sharded data. e.g.: "gs://.../training-*.jsonl" * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data * AIP_TRAINING_DATA_URI = "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT\}" * AIP_VALIDATION_DATA_URI = "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT\}" * AIP_TEST_DATA_URI = "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT\}" + * A token to retrieve next page of results. Pass to ListNotebookExecutionJobs.page_token to obtain that page. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + nextPageToken?: string | null; /** - * Whether to persist the ML use assignment to data item system labels. + * List of NotebookExecutionJobs in the requested page. */ - persistMlUseAssignment?: boolean | null; + notebookExecutionJobs?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJob[]; + } + /** + * Response message for NotebookService.ListNotebookRuntimes. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListNotebookRuntimesResponse { /** - * Supported only for tabular Datasets. Split based on a predefined key. + * A token to retrieve next page of results. Pass to ListNotebookRuntimesRequest.page_token to obtain that page. */ - predefinedSplit?: Schema$GoogleCloudAiplatformV1beta1PredefinedSplit; + nextPageToken?: string | null; /** - * Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery (annotation set) under the Dataset specified by dataset_id used for filtering Annotations for training. Only Annotations that are associated with this SavedQuery are used in respectively training. When used in conjunction with annotations_filter, the Annotations used for training are filtered by both saved_query_id and annotations_filter. Only one of saved_query_id and annotation_schema_uri should be specified as both of them represent the same thing: problem type. + * List of NotebookRuntimes in the requested page. */ - savedQueryId?: string | null; + notebookRuntimes?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntime[]; + } + /** + * Response message for NotebookService.ListNotebookRuntimeTemplates. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListNotebookRuntimeTemplatesResponse { /** - * Supported only for tabular Datasets. Split based on the distribution of the specified column. + * A token to retrieve next page of results. Pass to ListNotebookRuntimeTemplatesRequest.page_token to obtain that page. */ - stratifiedSplit?: Schema$GoogleCloudAiplatformV1beta1StratifiedSplit; + nextPageToken?: string | null; /** - * Supported only for tabular Datasets. Split based on the timestamp of the input data pieces. + * List of NotebookRuntimeTemplates in the requested page. */ - timestampSplit?: Schema$GoogleCloudAiplatformV1beta1TimestampSplit; + notebookRuntimeTemplates?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplate[]; } /** - * A list of int64 values. + * Request message for VizierService.ListOptimalTrials. */ - export interface Schema$GoogleCloudAiplatformV1beta1Int64Array { + export interface Schema$GoogleCloudAiplatformV1beta1ListOptimalTrialsRequest {} + /** + * Response message for VizierService.ListOptimalTrials. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListOptimalTrialsResponse { /** - * A list of int64 values. + * The pareto-optimal Trials for multiple objective Study or the optimal trial for single objective Study. The definition of pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency */ - values?: string[] | null; + optimalTrials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; } /** - * An attribution method that computes the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 + * Response message for PersistentResourceService.ListPersistentResources */ - export interface Schema$GoogleCloudAiplatformV1beta1IntegratedGradientsAttribution { + export interface Schema$GoogleCloudAiplatformV1beta1ListPersistentResourcesResponse { /** - * Config for IG with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 + * A token to retrieve next page of results. Pass to ListPersistentResourcesRequest.page_token to obtain that page. */ - blurBaselineConfig?: Schema$GoogleCloudAiplatformV1beta1BlurBaselineConfig; + nextPageToken?: string | null; + persistentResources?: Schema$GoogleCloudAiplatformV1beta1PersistentResource[]; + } + /** + * Response message for PipelineService.ListPipelineJobs + */ + export interface Schema$GoogleCloudAiplatformV1beta1ListPipelineJobsResponse { /** - * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + * A token to retrieve the next page of results. Pass to ListPipelineJobsRequest.page_token to obtain that page. */ - smoothGradConfig?: Schema$GoogleCloudAiplatformV1beta1SmoothGradConfig; + nextPageToken?: string | null; /** - * Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively. + * List of PipelineJobs in the requested page. */ - stepCount?: number | null; + pipelineJobs?: Schema$GoogleCloudAiplatformV1beta1PipelineJob[]; } /** - * Request message for [InternalOsServiceStateInstance]. + * Response message for ModelGardenService.ListPublisherModels. */ - export interface Schema$GoogleCloudAiplatformV1beta1InternalOsServiceStateInstance { + export interface Schema$GoogleCloudAiplatformV1beta1ListPublisherModelsResponse { /** - * Required. internal service name. + * A token to retrieve next page of results. Pass to ListPublisherModels.page_token to obtain that page. */ - serviceName?: string | null; + nextPageToken?: string | null; /** - * Required. internal service state. + * List of PublisherModels in the requested page. */ - serviceState?: string | null; + publisherModels?: Schema$GoogleCloudAiplatformV1beta1PublisherModel[]; } /** - * Contains information about the Large Model. + * Response message for VertexRagDataService.ListRagCorpora. */ - export interface Schema$GoogleCloudAiplatformV1beta1LargeModelReference { + export interface Schema$GoogleCloudAiplatformV1beta1ListRagCorporaResponse { /** - * Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. + * A token to retrieve the next page of results. Pass to ListRagCorporaRequest.page_token to obtain that page. */ - name?: string | null; + nextPageToken?: string | null; + /** + * List of RagCorpora in the requested page. + */ + ragCorpora?: Schema$GoogleCloudAiplatformV1beta1RagCorpus[]; } /** - * A subgraph of the overall lineage graph. Event edges connect Artifact and Execution nodes. + * Response message for VertexRagDataService.ListRagFiles. */ - export interface Schema$GoogleCloudAiplatformV1beta1LineageSubgraph { - /** - * The Artifact nodes in the subgraph. - */ - artifacts?: Schema$GoogleCloudAiplatformV1beta1Artifact[]; + export interface Schema$GoogleCloudAiplatformV1beta1ListRagFilesResponse { /** - * The Event edges between Artifacts and Executions in the subgraph. + * A token to retrieve the next page of results. Pass to ListRagFilesRequest.page_token to obtain that page. */ - events?: Schema$GoogleCloudAiplatformV1beta1Event[]; + nextPageToken?: string | null; /** - * The Execution nodes in the subgraph. + * List of RagFiles in the requested page. */ - executions?: Schema$GoogleCloudAiplatformV1beta1Execution[]; + ragFiles?: Schema$GoogleCloudAiplatformV1beta1RagFile[]; } /** - * Response message for DatasetService.ListAnnotations. + * Response message for ReasoningEngineService.ListReasoningEngines */ - export interface Schema$GoogleCloudAiplatformV1beta1ListAnnotationsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse { /** - * A list of Annotations that matches the specified filter in the request. + * A token to retrieve the next page of results. Pass to ListReasoningEnginesRequest.page_token to obtain that page. */ - annotations?: Schema$GoogleCloudAiplatformV1beta1Annotation[]; + nextPageToken?: string | null; /** - * The standard List next-page token. + * List of ReasoningEngines in the requested page. */ - nextPageToken?: string | null; + reasoningEngines?: Schema$GoogleCloudAiplatformV1beta1ReasoningEngine[]; } /** - * Response message for MetadataService.ListArtifacts. + * Response message for DatasetService.ListSavedQueries. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListArtifactsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListSavedQueriesResponse { /** - * The Artifacts retrieved from the MetadataStore. + * The standard List next-page token. */ - artifacts?: Schema$GoogleCloudAiplatformV1beta1Artifact[]; + nextPageToken?: string | null; /** - * A token, which can be sent as ListArtifactsRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. + * A list of SavedQueries that match the specified filter in the request. */ - nextPageToken?: string | null; + savedQueries?: Schema$GoogleCloudAiplatformV1beta1SavedQuery[]; } /** - * Response message for JobService.ListBatchPredictionJobs + * Response message for ScheduleService.ListSchedules */ - export interface Schema$GoogleCloudAiplatformV1beta1ListBatchPredictionJobsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListSchedulesResponse { /** - * List of BatchPredictionJobs in the requested page. + * A token to retrieve the next page of results. Pass to ListSchedulesRequest.page_token to obtain that page. */ - batchPredictionJobs?: Schema$GoogleCloudAiplatformV1beta1BatchPredictionJob[]; + nextPageToken?: string | null; /** - * A token to retrieve the next page of results. Pass to ListBatchPredictionJobsRequest.page_token to obtain that page. + * List of Schedules in the requested page. */ - nextPageToken?: string | null; + schedules?: Schema$GoogleCloudAiplatformV1beta1Schedule[]; } /** - * Response message for MetadataService.ListContexts. + * Response message for SpecialistPoolService.ListSpecialistPools. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListContextsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListSpecialistPoolsResponse { /** - * The Contexts retrieved from the MetadataStore. + * The standard List next-page token. */ - contexts?: Schema$GoogleCloudAiplatformV1beta1Context[]; + nextPageToken?: string | null; /** - * A token, which can be sent as ListContextsRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. + * A list of SpecialistPools that matches the specified filter in the request. */ - nextPageToken?: string | null; + specialistPools?: Schema$GoogleCloudAiplatformV1beta1SpecialistPool[]; } /** - * Response message for JobService.ListCustomJobs + * Response message for VizierService.ListStudies. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListCustomJobsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListStudiesResponse { /** - * List of CustomJobs in the requested page. + * Passes this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages. */ - customJobs?: Schema$GoogleCloudAiplatformV1beta1CustomJob[]; + nextPageToken?: string | null; /** - * A token to retrieve the next page of results. Pass to ListCustomJobsRequest.page_token to obtain that page. + * The studies associated with the project. */ - nextPageToken?: string | null; + studies?: Schema$GoogleCloudAiplatformV1beta1Study[]; } /** - * Response message for DatasetService.ListDataItems. + * Response message for TensorboardService.ListTensorboardExperiments. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListDataItemsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardExperimentsResponse { /** - * A list of DataItems that matches the specified filter in the request. + * A token, which can be sent as ListTensorboardExperimentsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - dataItems?: Schema$GoogleCloudAiplatformV1beta1DataItem[]; + nextPageToken?: string | null; /** - * The standard List next-page token. + * The TensorboardExperiments mathching the request. */ - nextPageToken?: string | null; + tensorboardExperiments?: Schema$GoogleCloudAiplatformV1beta1TensorboardExperiment[]; } /** - * Response message for JobService.ListDataLabelingJobs. + * Response message for TensorboardService.ListTensorboardRuns. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListDataLabelingJobsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardRunsResponse { /** - * A list of DataLabelingJobs that matches the specified filter in the request. + * A token, which can be sent as ListTensorboardRunsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - dataLabelingJobs?: Schema$GoogleCloudAiplatformV1beta1DataLabelingJob[]; + nextPageToken?: string | null; /** - * The standard List next-page token. + * The TensorboardRuns mathching the request. */ - nextPageToken?: string | null; + tensorboardRuns?: Schema$GoogleCloudAiplatformV1beta1TensorboardRun[]; } /** - * Response message for DatasetService.ListDatasets. + * Response message for TensorboardService.ListTensorboards. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListDatasetsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardsResponse { /** - * A list of Datasets that matches the specified filter in the request. + * A token, which can be sent as ListTensorboardsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - datasets?: Schema$GoogleCloudAiplatformV1beta1Dataset[]; + nextPageToken?: string | null; /** - * The standard List next-page token. + * The Tensorboards mathching the request. */ - nextPageToken?: string | null; + tensorboards?: Schema$GoogleCloudAiplatformV1beta1Tensorboard[]; } /** - * Response message for DatasetService.ListDatasetVersions. + * Response message for TensorboardService.ListTensorboardTimeSeries. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListDatasetVersionsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardTimeSeriesResponse { /** - * A list of DatasetVersions that matches the specified filter in the request. + * A token, which can be sent as ListTensorboardTimeSeriesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - datasetVersions?: Schema$GoogleCloudAiplatformV1beta1DatasetVersion[]; + nextPageToken?: string | null; /** - * The standard List next-page token. + * The TensorboardTimeSeries mathching the request. */ - nextPageToken?: string | null; + tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries[]; } /** - * Response message for ListDeploymentResourcePools method. + * Response message for PipelineService.ListTrainingPipelines */ - export interface Schema$GoogleCloudAiplatformV1beta1ListDeploymentResourcePoolsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListTrainingPipelinesResponse { /** - * The DeploymentResourcePools from the specified location. + * A token to retrieve the next page of results. Pass to ListTrainingPipelinesRequest.page_token to obtain that page. */ - deploymentResourcePools?: Schema$GoogleCloudAiplatformV1beta1DeploymentResourcePool[]; + nextPageToken?: string | null; /** - * A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. + * List of TrainingPipelines in the requested page. */ - nextPageToken?: string | null; + trainingPipelines?: Schema$GoogleCloudAiplatformV1beta1TrainingPipeline[]; } /** - * Response message for EndpointService.ListEndpoints. + * Response message for VizierService.ListTrials. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListEndpointsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListTrialsResponse { /** - * List of Endpoints in the requested page. + * Pass this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages. */ - endpoints?: Schema$GoogleCloudAiplatformV1beta1Endpoint[]; + nextPageToken?: string | null; /** - * A token to retrieve the next page of results. Pass to ListEndpointsRequest.page_token to obtain that page. + * The Trials associated with the Study. */ - nextPageToken?: string | null; + trials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; } /** - * Response message for FeaturestoreService.ListEntityTypes. + * Response message for GenAiTuningService.ListTuningJobs */ - export interface Schema$GoogleCloudAiplatformV1beta1ListEntityTypesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ListTuningJobsResponse { /** - * The EntityTypes matching the request. + * A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page. */ - entityTypes?: Schema$GoogleCloudAiplatformV1beta1EntityType[]; + nextPageToken?: string | null; /** - * A token, which can be sent as ListEntityTypesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * List of TuningJobs in the requested page. */ - nextPageToken?: string | null; + tuningJobs?: Schema$GoogleCloudAiplatformV1beta1TuningJob[]; } /** - * Response message for MetadataService.ListExecutions. + * Request message for VizierService.LookupStudy. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListExecutionsResponse { - /** - * The Executions retrieved from the MetadataStore. - */ - executions?: Schema$GoogleCloudAiplatformV1beta1Execution[]; + export interface Schema$GoogleCloudAiplatformV1beta1LookupStudyRequest { /** - * A token, which can be sent as ListExecutionsRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. + * Required. The user-defined display name of the Study */ - nextPageToken?: string | null; + displayName?: string | null; } /** - * Response message for ExtensionRegistryService.ListExtensions + * Specification of a single machine. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListExtensionsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MachineSpec { /** - * List of Extension in the requested page. + * The number of accelerators to attach to the machine. */ - extensions?: Schema$GoogleCloudAiplatformV1beta1Extension[]; + acceleratorCount?: number | null; /** - * A token to retrieve the next page of results. Pass to ListExtensionsRequest.page_token to obtain that page. + * Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. */ - nextPageToken?: string | null; - } - /** - * Response message for FeatureRegistryService.ListFeatureGroups. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureGroupsResponse { + acceleratorType?: string | null; /** - * The FeatureGroups matching the request. + * Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. */ - featureGroups?: Schema$GoogleCloudAiplatformV1beta1FeatureGroup[]; + machineType?: string | null; /** - * A token, which can be sent as ListFeatureGroupsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). */ - nextPageToken?: string | null; + tpuTopology?: string | null; } /** - * Response message for FeatureOnlineStoreAdminService.ListFeatureOnlineStores. + * Manual batch tuning parameters. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureOnlineStoresResponse { - /** - * The FeatureOnlineStores matching the request. - */ - featureOnlineStores?: Schema$GoogleCloudAiplatformV1beta1FeatureOnlineStore[]; + export interface Schema$GoogleCloudAiplatformV1beta1ManualBatchTuningParameters { /** - * A token, which can be sent as ListFeatureOnlineStoresRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Immutable. The number of the records (e.g. instances) of the operation given in each batch to a machine replica. Machine type, and size of a single record should be considered when setting this parameter, higher value speeds up the batch operation's execution, but too high value will result in a whole batch not fitting in a machine's memory, and the whole operation will fail. The default value is 64. */ - nextPageToken?: string | null; + batchSize?: number | null; } /** - * Response message for FeaturestoreService.ListFeatures. Response message for FeatureRegistryService.ListFeatures. + * A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListFeaturesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1Measurement { /** - * The Features matching the request. + * Output only. Time that the Trial has been running at the point of this Measurement. */ - features?: Schema$GoogleCloudAiplatformV1beta1Feature[]; + elapsedDuration?: string | null; /** - * A token, which can be sent as ListFeaturesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. */ - nextPageToken?: string | null; + metrics?: Schema$GoogleCloudAiplatformV1beta1MeasurementMetric[]; + /** + * Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + */ + stepCount?: string | null; } /** - * Response message for FeaturestoreService.ListFeaturestores. + * A message representing a metric in the measurement. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListFeaturestoresResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MeasurementMetric { /** - * The Featurestores matching the request. + * Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. */ - featurestores?: Schema$GoogleCloudAiplatformV1beta1Featurestore[]; + metricId?: string | null; /** - * A token, which can be sent as ListFeaturestoresRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Output only. The value for this metric. */ - nextPageToken?: string | null; + value?: number | null; } /** - * Response message for FeatureOnlineStoreAdminService.ListFeatureViews. + * Request message for ModelService.MergeVersionAliases. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureViewsResponse { - /** - * The FeatureViews matching the request. - */ - featureViews?: Schema$GoogleCloudAiplatformV1beta1FeatureView[]; + export interface Schema$GoogleCloudAiplatformV1beta1MergeVersionAliasesRequest { /** - * A token, which can be sent as ListFeatureViewsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Required. The set of version aliases to merge. The alias should be at most 128 characters, and match `a-z{0,126\}[a-z-0-9]`. Add the `-` prefix to an alias means removing that alias from the version. `-` is NOT counted in the 128 characters. Example: `-golden` means removing the `golden` alias from the version. There is NO ordering in aliases, which means 1) The aliases returned from GetModel API might not have the exactly same order from this MergeVersionAliases API. 2) Adding and deleting the same alias in the request is not recommended, and the 2 operations will be cancelled out. */ - nextPageToken?: string | null; + versionAliases?: string[] | null; } /** - * Response message for FeatureOnlineStoreAdminService.ListFeatureViewSyncs. + * Instance of a general MetadataSchema. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListFeatureViewSyncsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MetadataSchema { /** - * The FeatureViewSyncs matching the request. + * Output only. Timestamp when this MetadataSchema was created. */ - featureViewSyncs?: Schema$GoogleCloudAiplatformV1beta1FeatureViewSync[]; + createTime?: string | null; /** - * A token, which can be sent as ListFeatureViewSyncsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Description of the Metadata Schema */ - nextPageToken?: string | null; - } - /** - * Response message for JobService.ListHyperparameterTuningJobs - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListHyperparameterTuningJobsResponse { + description?: string | null; /** - * List of HyperparameterTuningJobs in the requested page. HyperparameterTuningJob.trials of the jobs will be not be returned. + * Output only. The resource name of the MetadataSchema. */ - hyperparameterTuningJobs?: Schema$GoogleCloudAiplatformV1beta1HyperparameterTuningJob[]; + name?: string | null; /** - * A token to retrieve the next page of results. Pass to ListHyperparameterTuningJobsRequest.page_token to obtain that page. + * Required. The raw YAML string representation of the MetadataSchema. The combination of [MetadataSchema.version] and the schema name given by `title` in [MetadataSchema.schema] must be unique within a MetadataStore. The schema is defined as an OpenAPI 3.0.2 [MetadataSchema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject) */ - nextPageToken?: string | null; - } - /** - * Response message for IndexEndpointService.ListIndexEndpoints. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListIndexEndpointsResponse { + schema?: string | null; /** - * List of IndexEndpoints in the requested page. + * The type of the MetadataSchema. This is a property that identifies which metadata types will use the MetadataSchema. */ - indexEndpoints?: Schema$GoogleCloudAiplatformV1beta1IndexEndpoint[]; + schemaType?: string | null; /** - * A token to retrieve next page of results. Pass to ListIndexEndpointsRequest.page_token to obtain that page. + * The version of the MetadataSchema. The version's format must match the following regular expression: `^[0-9]+.+.+$`, which would allow to order/compare different versions. Example: 1.0.0, 1.0.1, etc. */ - nextPageToken?: string | null; + schemaVersion?: string | null; } /** - * Response message for IndexService.ListIndexes. + * Instance of a metadata store. Contains a set of metadata that can be queried. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListIndexesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MetadataStore { /** - * List of indexes in the requested page. + * Output only. Timestamp when this MetadataStore was created. */ - indexes?: Schema$GoogleCloudAiplatformV1beta1Index[]; + createTime?: string | null; /** - * A token to retrieve next page of results. Pass to ListIndexesRequest.page_token to obtain that page. + * Description of the MetadataStore. */ - nextPageToken?: string | null; - } - /** - * Response message for MetadataService.ListMetadataSchemas. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListMetadataSchemasResponse { + description?: string | null; /** - * The MetadataSchemas found for the MetadataStore. + * Customer-managed encryption key spec for a Metadata Store. If set, this Metadata Store and all sub-resources of this Metadata Store are secured using this key. */ - metadataSchemas?: Schema$GoogleCloudAiplatformV1beta1MetadataSchema[]; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * A token, which can be sent as ListMetadataSchemasRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. + * Output only. The resource name of the MetadataStore instance. */ - nextPageToken?: string | null; - } - /** - * Response message for MetadataService.ListMetadataStores. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListMetadataStoresResponse { + name?: string | null; /** - * The MetadataStores found for the Location. + * Output only. State information of the MetadataStore. */ - metadataStores?: Schema$GoogleCloudAiplatformV1beta1MetadataStore[]; + state?: Schema$GoogleCloudAiplatformV1beta1MetadataStoreMetadataStoreState; /** - * A token, which can be sent as ListMetadataStoresRequest.page_token to retrieve the next page. If this field is not populated, there are no subsequent pages. + * Output only. Timestamp when this MetadataStore was last updated. */ - nextPageToken?: string | null; + updateTime?: string | null; } /** - * Response message for JobService.ListModelDeploymentMonitoringJobs. + * Represents state information for a MetadataStore. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListModelDeploymentMonitoringJobsResponse { - /** - * A list of ModelDeploymentMonitoringJobs that matches the specified filter in the request. - */ - modelDeploymentMonitoringJobs?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJob[]; + export interface Schema$GoogleCloudAiplatformV1beta1MetadataStoreMetadataStoreState { /** - * The standard List next-page token. + * The disk utilization of the MetadataStore in bytes. */ - nextPageToken?: string | null; + diskUtilizationBytes?: string | null; } /** - * Response message for ModelService.ListModelEvaluationSlices. + * Represents one resource that exists in automl.googleapis.com, datalabeling.googleapis.com or ml.googleapis.com. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListModelEvaluationSlicesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigratableResource { /** - * List of ModelEvaluations in the requested page. + * Output only. Represents one Dataset in automl.googleapis.com. */ - modelEvaluationSlices?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSlice[]; + automlDataset?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlDataset; /** - * A token to retrieve next page of results. Pass to ListModelEvaluationSlicesRequest.page_token to obtain that page. + * Output only. Represents one Model in automl.googleapis.com. */ - nextPageToken?: string | null; - } - /** - * Response message for ModelService.ListModelEvaluations. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListModelEvaluationsResponse { + automlModel?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlModel; /** - * List of ModelEvaluations in the requested page. + * Output only. Represents one Dataset in datalabeling.googleapis.com. */ - modelEvaluations?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluation[]; + dataLabelingDataset?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDataset; /** - * A token to retrieve next page of results. Pass to ListModelEvaluationsRequest.page_token to obtain that page. + * Output only. Timestamp when the last migration attempt on this MigratableResource started. Will not be set if there's no migration attempt on this MigratableResource. */ - nextPageToken?: string | null; - } - /** - * Response message for ModelMonitoringService.ListModelMonitoringJobs. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListModelMonitoringJobsResponse { + lastMigrateTime?: string | null; /** - * A list of ModelMonitoringJobs that matches the specified filter in the request. + * Output only. Timestamp when this MigratableResource was last updated. */ - modelMonitoringJobs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJob[]; + lastUpdateTime?: string | null; /** - * The standard List next-page token. + * Output only. Represents one Version in ml.googleapis.com. */ - nextPageToken?: string | null; + mlEngineModelVersion?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceMlEngineModelVersion; } /** - * Response message for ModelMonitoringService.ListModelMonitors + * Represents one Dataset in automl.googleapis.com. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListModelMonitorsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlDataset { /** - * List of ModelMonitor in the requested page. + * Full resource name of automl Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}`. */ - modelMonitors?: Schema$GoogleCloudAiplatformV1beta1ModelMonitor[]; + dataset?: string | null; /** - * A token to retrieve the next page of results. Pass to ListModelMonitorsRequest.page_token to obtain that page. + * The Dataset's display name in automl.googleapis.com. */ - nextPageToken?: string | null; + datasetDisplayName?: string | null; } /** - * Response message for ModelService.ListModels + * Represents one Model in automl.googleapis.com. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListModelsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlModel { /** - * List of Models in the requested page. + * Full resource name of automl Model. Format: `projects/{project\}/locations/{location\}/models/{model\}`. */ - models?: Schema$GoogleCloudAiplatformV1beta1Model[]; + model?: string | null; /** - * A token to retrieve next page of results. Pass to ListModelsRequest.page_token to obtain that page. + * The Model's display name in automl.googleapis.com. */ - nextPageToken?: string | null; + modelDisplayName?: string | null; } /** - * Response message for ModelService.ListModelVersions + * Represents one Dataset in datalabeling.googleapis.com. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListModelVersionsResponse { - /** - * List of Model versions in the requested page. In the returned Model name field, version ID instead of regvision tag will be included. - */ - models?: Schema$GoogleCloudAiplatformV1beta1Model[]; + export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDataset { /** - * A token to retrieve the next page of results. Pass to ListModelVersionsRequest.page_token to obtain that page. + * The migratable AnnotatedDataset in datalabeling.googleapis.com belongs to the data labeling Dataset. */ - nextPageToken?: string | null; - } - /** - * Response message for JobService.ListNasJobs - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListNasJobsResponse { + dataLabelingAnnotatedDatasets?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset[]; /** - * List of NasJobs in the requested page. NasJob.nas_job_output of the jobs will not be returned. + * Full resource name of data labeling Dataset. Format: `projects/{project\}/datasets/{dataset\}`. */ - nasJobs?: Schema$GoogleCloudAiplatformV1beta1NasJob[]; + dataset?: string | null; /** - * A token to retrieve the next page of results. Pass to ListNasJobsRequest.page_token to obtain that page. + * The Dataset's display name in datalabeling.googleapis.com. */ - nextPageToken?: string | null; + datasetDisplayName?: string | null; } /** - * Response message for JobService.ListNasTrialDetails + * Represents one AnnotatedDataset in datalabeling.googleapis.com. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListNasTrialDetailsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset { /** - * List of top NasTrials in the requested page. + * Full resource name of data labeling AnnotatedDataset. Format: `projects/{project\}/datasets/{dataset\}/annotatedDatasets/{annotated_dataset\}`. */ - nasTrialDetails?: Schema$GoogleCloudAiplatformV1beta1NasTrialDetail[]; + annotatedDataset?: string | null; /** - * A token to retrieve the next page of results. Pass to ListNasTrialDetailsRequest.page_token to obtain that page. + * The AnnotatedDataset's display name in datalabeling.googleapis.com. */ - nextPageToken?: string | null; + annotatedDatasetDisplayName?: string | null; } /** - * Response message for [NotebookService.CreateNotebookExecutionJob] + * Represents one model Version in ml.googleapis.com. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListNotebookExecutionJobsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceMlEngineModelVersion { /** - * A token to retrieve next page of results. Pass to ListNotebookExecutionJobs.page_token to obtain that page. + * The ml.googleapis.com endpoint that this model Version currently lives in. Example values: * ml.googleapis.com * us-centrall-ml.googleapis.com * europe-west4-ml.googleapis.com * asia-east1-ml.googleapis.com */ - nextPageToken?: string | null; + endpoint?: string | null; /** - * List of NotebookExecutionJobs in the requested page. + * Full resource name of ml engine model Version. Format: `projects/{project\}/models/{model\}/versions/{version\}`. */ - notebookExecutionJobs?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJob[]; + version?: string | null; } /** - * Response message for NotebookService.ListNotebookRuntimes. + * Config of migrating one resource from automl.googleapis.com, datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListNotebookRuntimesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequest { /** - * A token to retrieve next page of results. Pass to ListNotebookRuntimesRequest.page_token to obtain that page. + * Config for migrating Dataset in automl.googleapis.com to Vertex AI's Dataset. */ - nextPageToken?: string | null; + migrateAutomlDatasetConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlDatasetConfig; /** - * List of NotebookRuntimes in the requested page. + * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. */ - notebookRuntimes?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntime[]; - } - /** - * Response message for NotebookService.ListNotebookRuntimeTemplates. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListNotebookRuntimeTemplatesResponse { + migrateAutomlModelConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlModelConfig; /** - * A token to retrieve next page of results. Pass to ListNotebookRuntimeTemplatesRequest.page_token to obtain that page. + * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's Dataset. */ - nextPageToken?: string | null; + migrateDataLabelingDatasetConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfig; /** - * List of NotebookRuntimeTemplates in the requested page. + * Config for migrating Version in ml.googleapis.com to Vertex AI's Model. */ - notebookRuntimeTemplates?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplate[]; + migrateMlEngineModelVersionConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateMlEngineModelVersionConfig; } /** - * Request message for VizierService.ListOptimalTrials. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListOptimalTrialsRequest {} - /** - * Response message for VizierService.ListOptimalTrials. + * Config for migrating Dataset in automl.googleapis.com to Vertex AI's Dataset. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListOptimalTrialsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlDatasetConfig { /** - * The pareto-optimal Trials for multiple objective Study or the optimal trial for single objective Study. The definition of pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency + * Required. Full resource name of automl Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}`. */ - optimalTrials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; - } - /** - * Response message for PersistentResourceService.ListPersistentResources - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListPersistentResourcesResponse { + dataset?: string | null; /** - * A token to retrieve next page of results. Pass to ListPersistentResourcesRequest.page_token to obtain that page. + * Required. Display name of the Dataset in Vertex AI. System will pick a display name if unspecified. */ - nextPageToken?: string | null; - persistentResources?: Schema$GoogleCloudAiplatformV1beta1PersistentResource[]; + datasetDisplayName?: string | null; } /** - * Response message for PipelineService.ListPipelineJobs + * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListPipelineJobsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlModelConfig { /** - * A token to retrieve the next page of results. Pass to ListPipelineJobsRequest.page_token to obtain that page. + * Required. Full resource name of automl Model. Format: `projects/{project\}/locations/{location\}/models/{model\}`. */ - nextPageToken?: string | null; + model?: string | null; /** - * List of PipelineJobs in the requested page. + * Optional. Display name of the model in Vertex AI. System will pick a display name if unspecified. */ - pipelineJobs?: Schema$GoogleCloudAiplatformV1beta1PipelineJob[]; + modelDisplayName?: string | null; } /** - * Response message for ModelGardenService.ListPublisherModels. + * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's Dataset. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListPublisherModelsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfig { /** - * A token to retrieve next page of results. Pass to ListPublisherModels.page_token to obtain that page. + * Required. Full resource name of data labeling Dataset. Format: `projects/{project\}/datasets/{dataset\}`. */ - nextPageToken?: string | null; + dataset?: string | null; /** - * List of PublisherModels in the requested page. + * Optional. Display name of the Dataset in Vertex AI. System will pick a display name if unspecified. */ - publisherModels?: Schema$GoogleCloudAiplatformV1beta1PublisherModel[]; + datasetDisplayName?: string | null; + /** + * Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com to Vertex AI's SavedQuery. The specified AnnotatedDatasets have to belong to the datalabeling Dataset. + */ + migrateDataLabelingAnnotatedDatasetConfigs?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig[]; } /** - * Response message for VertexRagDataService.ListRagCorpora. + * Config for migrating AnnotatedDataset in datalabeling.googleapis.com to Vertex AI's SavedQuery. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListRagCorporaResponse { - /** - * A token to retrieve the next page of results. Pass to ListRagCorporaRequest.page_token to obtain that page. - */ - nextPageToken?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig { /** - * List of RagCorpora in the requested page. + * Required. Full resource name of data labeling AnnotatedDataset. Format: `projects/{project\}/datasets/{dataset\}/annotatedDatasets/{annotated_dataset\}`. */ - ragCorpora?: Schema$GoogleCloudAiplatformV1beta1RagCorpus[]; + annotatedDataset?: string | null; } /** - * Response message for VertexRagDataService.ListRagFiles. + * Config for migrating version in ml.googleapis.com to Vertex AI's Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListRagFilesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateMlEngineModelVersionConfig { /** - * A token to retrieve the next page of results. Pass to ListRagFilesRequest.page_token to obtain that page. + * Required. The ml.googleapis.com endpoint that this model version should be migrated from. Example values: * ml.googleapis.com * us-centrall-ml.googleapis.com * europe-west4-ml.googleapis.com * asia-east1-ml.googleapis.com */ - nextPageToken?: string | null; + endpoint?: string | null; /** - * List of RagFiles in the requested page. + * Required. Display name of the model in Vertex AI. System will pick a display name if unspecified. */ - ragFiles?: Schema$GoogleCloudAiplatformV1beta1RagFile[]; + modelDisplayName?: string | null; + /** + * Required. Full resource name of ml engine model version. Format: `projects/{project\}/models/{model\}/versions/{version\}`. + */ + modelVersion?: string | null; } /** - * Response message for ReasoningEngineService.ListReasoningEngines + * Describes a successfully migrated resource. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceResponse { /** - * A token to retrieve the next page of results. Pass to ListReasoningEnginesRequest.page_token to obtain that page. + * Migrated Dataset's resource name. */ - nextPageToken?: string | null; + dataset?: string | null; /** - * List of ReasoningEngines in the requested page. + * Before migration, the identifier in ml.googleapis.com, automl.googleapis.com or datalabeling.googleapis.com. */ - reasoningEngines?: Schema$GoogleCloudAiplatformV1beta1ReasoningEngine[]; + migratableResource?: Schema$GoogleCloudAiplatformV1beta1MigratableResource; + /** + * Migrated Model's resource name. + */ + model?: string | null; } /** - * Response message for DatasetService.ListSavedQueries. + * A trained machine learning Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1ListSavedQueriesResponse { + export interface Schema$GoogleCloudAiplatformV1beta1Model { /** - * The standard List next-page token. + * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not required for AutoML Models. */ - nextPageToken?: string | null; + artifactUri?: string | null; /** - * A list of SavedQueries that match the specified filter in the request. + * Optional. User input field to specify the base model source. Currently it only supports specifing the Model Garden models and Genie models. */ - savedQueries?: Schema$GoogleCloudAiplatformV1beta1SavedQuery[]; - } - /** - * Response message for ScheduleService.ListSchedules - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListSchedulesResponse { + baseModelSource?: Schema$GoogleCloudAiplatformV1beta1ModelBaseModelSource; /** - * A token to retrieve the next page of results. Pass to ListSchedulesRequest.page_token to obtain that page. + * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not required for AutoML Models. */ - nextPageToken?: string | null; + containerSpec?: Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec; /** - * List of Schedules in the requested page. + * Output only. Timestamp when this Model was uploaded into Vertex AI. */ - schedules?: Schema$GoogleCloudAiplatformV1beta1Schedule[]; - } - /** - * Response message for SpecialistPoolService.ListSpecialistPools. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListSpecialistPoolsResponse { + createTime?: string | null; /** - * The standard List next-page token. + * Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. */ - nextPageToken?: string | null; + deployedModels?: Schema$GoogleCloudAiplatformV1beta1DeployedModelRef[]; /** - * A list of SpecialistPools that matches the specified filter in the request. + * The description of the Model. */ - specialistPools?: Schema$GoogleCloudAiplatformV1beta1SpecialistPool[]; - } - /** - * Response message for VizierService.ListStudies. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListStudiesResponse { + description?: string | null; /** - * Passes this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages. + * Required. The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - nextPageToken?: string | null; + displayName?: string | null; /** - * The studies associated with the project. + * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. */ - studies?: Schema$GoogleCloudAiplatformV1beta1Study[]; - } - /** - * Response message for TensorboardService.ListTensorboardExperiments. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardExperimentsResponse { + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * A token, which can be sent as ListTensorboardExperimentsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - nextPageToken?: string | null; + etag?: string | null; /** - * The TensorboardExperiments mathching the request. + * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. */ - tensorboardExperiments?: Schema$GoogleCloudAiplatformV1beta1TensorboardExperiment[]; - } - /** - * Response message for TensorboardService.ListTensorboardRuns. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardRunsResponse { + explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; /** - * A token, which can be sent as ListTensorboardRunsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - nextPageToken?: string | null; + labels?: {[key: string]: string} | null; /** - * The TensorboardRuns mathching the request. + * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. */ - tensorboardRuns?: Schema$GoogleCloudAiplatformV1beta1TensorboardRun[]; - } - /** - * Response message for TensorboardService.ListTensorboards. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardsResponse { + metadata?: any | null; /** - * A token, which can be sent as ListTensorboardsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Output only. The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project\}/locations/{location\}/metadataStores/{metadata_store\}/artifacts/{artifact\}`. */ - nextPageToken?: string | null; + metadataArtifact?: string | null; /** - * The Tensorboards mathching the request. + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - tensorboards?: Schema$GoogleCloudAiplatformV1beta1Tensorboard[]; - } - /** - * Response message for TensorboardService.ListTensorboardTimeSeries. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListTensorboardTimeSeriesResponse { + metadataSchemaUri?: string | null; /** - * A token, which can be sent as ListTensorboardTimeSeriesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Output only. Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or saved and tuned from Genie or Model Garden. */ - nextPageToken?: string | null; + modelSourceInfo?: Schema$GoogleCloudAiplatformV1beta1ModelSourceInfo; /** - * The TensorboardTimeSeries mathching the request. + * The resource name of the Model. */ - tensorboardTimeSeries?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries[]; - } - /** - * Response message for PipelineService.ListTrainingPipelines - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListTrainingPipelinesResponse { + name?: string | null; /** - * A token to retrieve the next page of results. Pass to ListTrainingPipelinesRequest.page_token to obtain that page. + * Output only. If this Model is a copy of another Model, this contains info about the original. */ - nextPageToken?: string | null; + originalModelInfo?: Schema$GoogleCloudAiplatformV1beta1ModelOriginalModelInfo; /** - * List of TrainingPipelines in the requested page. + * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. */ - trainingPipelines?: Schema$GoogleCloudAiplatformV1beta1TrainingPipeline[]; - } - /** - * Response message for VizierService.ListTrials. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListTrialsResponse { + predictSchemata?: Schema$GoogleCloudAiplatformV1beta1PredictSchemata; /** - * Pass this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages. + * Output only. When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. */ - nextPageToken?: string | null; + supportedDeploymentResourcesTypes?: string[] | null; /** - * The Trials associated with the Study. + * Output only. The formats in which this Model may be exported. If empty, this Model is not available for export. */ - trials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; - } - /** - * Response message for GenAiTuningService.ListTuningJobs - */ - export interface Schema$GoogleCloudAiplatformV1beta1ListTuningJobsResponse { + supportedExportFormats?: Schema$GoogleCloudAiplatformV1beta1ModelExportFormat[]; /** - * A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page. + * Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. */ - nextPageToken?: string | null; + supportedInputStorageFormats?: string[] | null; /** - * List of TuningJobs in the requested page. + * Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. */ - tuningJobs?: Schema$GoogleCloudAiplatformV1beta1TuningJob[]; - } - /** - * Request message for VizierService.LookupStudy. - */ - export interface Schema$GoogleCloudAiplatformV1beta1LookupStudyRequest { + supportedOutputStorageFormats?: string[] | null; /** - * Required. The user-defined display name of the Study + * Output only. The resource name of the TrainingPipeline that uploaded this Model, if any. */ - displayName?: string | null; - } - /** - * Specification of a single machine. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MachineSpec { + trainingPipeline?: string | null; /** - * The number of accelerators to attach to the machine. + * Output only. Timestamp when this Model was most recently updated. */ - acceleratorCount?: number | null; + updateTime?: string | null; /** - * Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project\}/locations/{location\}/models/{model_id\}@{version_alias\}` instead of auto-generated version id (i.e. `projects/{project\}/locations/{location\}/models/{model_id\}@{version_id\})`. The format is a-z{0,126\}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. */ - acceleratorType?: string | null; + versionAliases?: string[] | null; /** - * Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + * Output only. Timestamp when this version was created. */ - machineType?: string | null; + versionCreateTime?: string | null; /** - * Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + * The description of this version. */ - tpuTopology?: string | null; + versionDescription?: string | null; + /** + * Output only. Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + */ + versionId?: string | null; + /** + * Output only. Timestamp when this version was most recently updated. + */ + versionUpdateTime?: string | null; } /** - * Manual batch tuning parameters. + * User input field to specify the base model source. Currently it only supports specifing the Model Garden models and Genie models. */ - export interface Schema$GoogleCloudAiplatformV1beta1ManualBatchTuningParameters { + export interface Schema$GoogleCloudAiplatformV1beta1ModelBaseModelSource { /** - * Immutable. The number of the records (e.g. instances) of the operation given in each batch to a machine replica. Machine type, and size of a single record should be considered when setting this parameter, higher value speeds up the batch operation's execution, but too high value will result in a whole batch not fitting in a machine's memory, and the whole operation will fail. The default value is 64. + * Information about the base model of Genie models. */ - batchSize?: number | null; + genieSource?: Schema$GoogleCloudAiplatformV1beta1GenieSource; + /** + * Source information of Model Garden models. + */ + modelGardenSource?: Schema$GoogleCloudAiplatformV1beta1ModelGardenSource; } /** - * A message representing a Measurement of a Trial. A Measurement contains the Metrics got by executing a Trial using suggested hyperparameter values. + * Specification of a container for serving predictions. Some fields in this message correspond to fields in the [Kubernetes Container v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ - export interface Schema$GoogleCloudAiplatformV1beta1Measurement { + export interface Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec { /** - * Output only. Time that the Trial has been running at the point of this Measurement. + * Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify this field as an array of executable and arguments, similar to a Docker `CMD`'s "default parameters" form. If you don't specify this field but do specify the command field, then the command from the `command` field runs without any additional arguments. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). If you don't specify this field and don't specify the `command` field, then the container's [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and `CMD` determine what runs based on their default behavior. See the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `args` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ - elapsedDuration?: string | null; + args?: string[] | null; /** - * Output only. A list of metrics got by evaluating the objective functions using suggested Parameter values. + * Immutable. Specifies the command that runs when the container starts. This overrides the container's [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). Specify this field as an array of executable and arguments, similar to a Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not specify this field, then the container's `ENTRYPOINT` runs, in conjunction with the args field or the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either exists. If this field is not specified and the container does not have an `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). If you specify this field, then you can also specify the `args` field to provide additional arguments for this command. However, if you specify this field, then the container's `CMD` is ignored. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `command` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ - metrics?: Schema$GoogleCloudAiplatformV1beta1MeasurementMetric[]; + command?: string[] | null; /** - * Output only. The number of steps the machine learning model has been trained for. Must be non-negative. + * Immutable. Deployment timeout. Limit for deployment timeout is 2 hours. */ - stepCount?: string | null; - } - /** - * A message representing a metric in the measurement. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MeasurementMetric { + deploymentTimeout?: string | null; /** - * Output only. The ID of the Metric. The Metric should be defined in StudySpec's Metrics. + * Immutable. List of environment variables to set in the container. After the container starts running, code running in the container can read these environment variables. Additionally, the command and args fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": "foo" \}, { "name": "VAR_2", "value": "$(VAR_1) bar" \} ] ``` If you switch the order of the variables in the example, then the expansion does not occur. This field corresponds to the `env` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ - metricId?: string | null; + env?: Schema$GoogleCloudAiplatformV1beta1EnvVar[]; /** - * Output only. The value for this metric. + * Immutable. List of ports to expose from the container. Vertex AI sends gRPC prediction requests that it receives to the first port on this list. Vertex AI also sends liveness and health checks to this port. If you do not specify this field, gRPC requests to the container will be disabled. Vertex AI does not use ports other than the first one listed. This field corresponds to the `ports` field of the Kubernetes Containers v1 core API. */ - value?: number | null; - } - /** - * Request message for ModelService.MergeVersionAliases. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MergeVersionAliasesRequest { + grpcPorts?: Schema$GoogleCloudAiplatformV1beta1Port[]; /** - * Required. The set of version aliases to merge. The alias should be at most 128 characters, and match `a-z{0,126\}[a-z-0-9]`. Add the `-` prefix to an alias means removing that alias from the version. `-` is NOT counted in the 128 characters. Example: `-golden` means removing the `golden` alias from the version. There is NO ordering in aliases, which means 1) The aliases returned from GetModel API might not have the exactly same order from this MergeVersionAliases API. 2) Adding and deleting the same alias in the request is not recommended, and the 2 operations will be cancelled out. + * Immutable. Specification for Kubernetes readiness probe. */ - versionAliases?: string[] | null; - } - /** - * Instance of a general MetadataSchema. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MetadataSchema { + healthProbe?: Schema$GoogleCloudAiplatformV1beta1Probe; /** - * Output only. Timestamp when this MetadataSchema was created. + * Immutable. HTTP path on the container to send health checks to. Vertex AI intermittently sends GET requests to this path on the container's IP address and port to check that the container is healthy. Read more about [health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). For example, if you set this field to `/bar`, then Vertex AI intermittently sends a GET request to the `/bar` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/ DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ - createTime?: string | null; + healthRoute?: string | null; /** - * Description of the Metadata Schema + * Required. Immutable. URI of the Docker image to be used as the custom container for serving predictions. This URI must identify an image in Artifact Registry or Container Registry. Learn more about the [container publishing requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), including permissions requirements for the Vertex AI Service Agent. The container image is ingested upon ModelService.UploadModel, stored internally, and this original path is afterwards not used. To learn about the requirements for the Docker image itself, see [Custom container requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). You can use the URI to one of Vertex AI's [pre-built container images for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) in this field. */ - description?: string | null; + imageUri?: string | null; /** - * Output only. The resource name of the MetadataSchema. + * Immutable. List of ports to expose from the container. Vertex AI sends any prediction requests that it receives to the first port on this list. Vertex AI also sends [liveness and health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) to this port. If you do not specify this field, it defaults to following value: ```json [ { "containerPort": 8080 \} ] ``` Vertex AI does not use ports other than the first one listed. This field corresponds to the `ports` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ - name?: string | null; + ports?: Schema$GoogleCloudAiplatformV1beta1Port[]; /** - * Required. The raw YAML string representation of the MetadataSchema. The combination of [MetadataSchema.version] and the schema name given by `title` in [MetadataSchema.schema] must be unique within a MetadataStore. The schema is defined as an OpenAPI 3.0.2 [MetadataSchema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject) + * Immutable. HTTP path on the container to send prediction requests to. Vertex AI forwards requests sent using projects.locations.endpoints.predict to this path on the container's IP address and port. Vertex AI then returns the container's response in the API response. For example, if you set this field to `/foo`, then when Vertex AI receives a prediction request, it forwards the request body in a POST request to the `/foo` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ - schema?: string | null; + predictRoute?: string | null; /** - * The type of the MetadataSchema. This is a property that identifies which metadata types will use the MetadataSchema. + * Immutable. The amount of the VM memory to reserve as the shared memory for the model in megabytes. */ - schemaType?: string | null; + sharedMemorySizeMb?: string | null; /** - * The version of the MetadataSchema. The version's format must match the following regular expression: `^[0-9]+.+.+$`, which would allow to order/compare different versions. Example: 1.0.0, 1.0.1, etc. + * Immutable. Specification for Kubernetes startup probe. */ - schemaVersion?: string | null; + startupProbe?: Schema$GoogleCloudAiplatformV1beta1Probe; } /** - * Instance of a metadata store. Contains a set of metadata that can be queried. + * ModelDeploymentMonitoringBigQueryTable specifies the BigQuery table name as well as some information of the logs stored in this table. */ - export interface Schema$GoogleCloudAiplatformV1beta1MetadataStore { + export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringBigQueryTable { /** - * Output only. Timestamp when this MetadataStore was created. + * The created BigQuery table to store logs. Customer could do their own query & analysis. Format: `bq://.model_deployment_monitoring_._` */ - createTime?: string | null; + bigqueryTablePath?: string | null; /** - * Description of the MetadataStore. - */ - description?: string | null; - /** - * Customer-managed encryption key spec for a Metadata Store. If set, this Metadata Store and all sub-resources of this Metadata Store are secured using this key. - */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; - /** - * Output only. The resource name of the MetadataStore instance. + * The source of log. */ - name?: string | null; + logSource?: string | null; /** - * Output only. State information of the MetadataStore. + * The type of log. */ - state?: Schema$GoogleCloudAiplatformV1beta1MetadataStoreMetadataStoreState; + logType?: string | null; /** - * Output only. Timestamp when this MetadataStore was last updated. + * Output only. The schema version of the request/response logging BigQuery table. Default to v1 if unset. */ - updateTime?: string | null; + requestResponseLoggingSchemaVersion?: string | null; } /** - * Represents state information for a MetadataStore. + * Represents a job that runs periodically to monitor the deployed models in an endpoint. It will analyze the logged training & prediction data to detect any abnormal behaviors. */ - export interface Schema$GoogleCloudAiplatformV1beta1MetadataStoreMetadataStoreState { + export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJob { /** - * The disk utilization of the MetadataStore in bytes. + * YAML schema file uri describing the format of a single instance that you want Tensorflow Data Validation (TFDV) to analyze. If this field is empty, all the feature data types are inferred from predict_instance_schema_uri, meaning that TFDV will use the data in the exact format(data type) as prediction request/response. If there are any data type differences between predict instance and TFDV instance, this field can be used to override the schema. For models trained with Vertex AI, this field must be set as all the fields in predict instance formatted as string. */ - diskUtilizationBytes?: string | null; - } - /** - * Represents one resource that exists in automl.googleapis.com, datalabeling.googleapis.com or ml.googleapis.com. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigratableResource { + analysisInstanceSchemaUri?: string | null; /** - * Output only. Represents one Dataset in automl.googleapis.com. + * Output only. The created bigquery tables for the job under customer project. Customer could do their own query & analysis. There could be 4 log tables in maximum: 1. Training data logging predict request/response 2. Serving data logging predict request/response */ - automlDataset?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlDataset; + bigqueryTables?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringBigQueryTable[]; /** - * Output only. Represents one Model in automl.googleapis.com. + * Output only. Timestamp when this ModelDeploymentMonitoringJob was created. */ - automlModel?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlModel; + createTime?: string | null; /** - * Output only. Represents one Dataset in datalabeling.googleapis.com. + * Required. The user-defined name of the ModelDeploymentMonitoringJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a ModelDeploymentMonitoringJob. */ - dataLabelingDataset?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDataset; + displayName?: string | null; /** - * Output only. Timestamp when the last migration attempt on this MigratableResource started. Will not be set if there's no migration attempt on this MigratableResource. + * If true, the scheduled monitoring pipeline logs are sent to Google Cloud Logging, including pipeline status and anomalies detected. Please note the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging#pricing). */ - lastMigrateTime?: string | null; + enableMonitoringPipelineLogs?: boolean | null; /** - * Output only. Timestamp when this MigratableResource was last updated. + * Customer-managed encryption key spec for a ModelDeploymentMonitoringJob. If set, this ModelDeploymentMonitoringJob and all sub-resources of this ModelDeploymentMonitoringJob will be secured by this key. */ - lastUpdateTime?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Output only. Represents one Version in ml.googleapis.com. + * Required. Endpoint resource name. Format: `projects/{project\}/locations/{location\}/endpoints/{endpoint\}` */ - mlEngineModelVersion?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceMlEngineModelVersion; - } - /** - * Represents one Dataset in automl.googleapis.com. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlDataset { + endpoint?: string | null; /** - * Full resource name of automl Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}`. + * Output only. Only populated when the job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ - dataset?: string | null; + error?: Schema$GoogleRpcStatus; /** - * The Dataset's display name in automl.googleapis.com. + * The labels with user-defined metadata to organize your ModelDeploymentMonitoringJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - datasetDisplayName?: string | null; - } - /** - * Represents one Model in automl.googleapis.com. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceAutomlModel { + labels?: {[key: string]: string} | null; /** - * Full resource name of automl Model. Format: `projects/{project\}/locations/{location\}/models/{model\}`. + * Output only. Latest triggered monitoring pipeline metadata. */ - model?: string | null; + latestMonitoringPipelineMetadata?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata; /** - * The Model's display name in automl.googleapis.com. + * Required. Sample Strategy for logging. */ - modelDisplayName?: string | null; - } - /** - * Represents one Dataset in datalabeling.googleapis.com. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDataset { + loggingSamplingStrategy?: Schema$GoogleCloudAiplatformV1beta1SamplingStrategy; /** - * The migratable AnnotatedDataset in datalabeling.googleapis.com belongs to the data labeling Dataset. + * The TTL of BigQuery tables in user projects which stores logs. A day is the basic unit of the TTL and we take the ceil of TTL/86400(a day). e.g. { second: 3600\} indicates ttl = 1 day. */ - dataLabelingAnnotatedDatasets?: Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset[]; + logTtl?: string | null; /** - * Full resource name of data labeling Dataset. Format: `projects/{project\}/datasets/{dataset\}`. + * Required. The config for monitoring objectives. This is a per DeployedModel config. Each DeployedModel needs to be configured separately. */ - dataset?: string | null; + modelDeploymentMonitoringObjectiveConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringObjectiveConfig[]; /** - * The Dataset's display name in datalabeling.googleapis.com. + * Required. Schedule config for running the monitoring job. */ - datasetDisplayName?: string | null; - } - /** - * Represents one AnnotatedDataset in datalabeling.googleapis.com. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset { + modelDeploymentMonitoringScheduleConfig?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringScheduleConfig; /** - * Full resource name of data labeling AnnotatedDataset. Format: `projects/{project\}/datasets/{dataset\}/annotatedDatasets/{annotated_dataset\}`. + * Alert config for model monitoring. */ - annotatedDataset?: string | null; + modelMonitoringAlertConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfig; /** - * The AnnotatedDataset's display name in datalabeling.googleapis.com. + * Output only. Resource name of a ModelDeploymentMonitoringJob. */ - annotatedDatasetDisplayName?: string | null; - } - /** - * Represents one model Version in ml.googleapis.com. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigratableResourceMlEngineModelVersion { + name?: string | null; /** - * The ml.googleapis.com endpoint that this model Version currently lives in. Example values: * ml.googleapis.com * us-centrall-ml.googleapis.com * europe-west4-ml.googleapis.com * asia-east1-ml.googleapis.com + * Output only. Timestamp when this monitoring pipeline will be scheduled to run for the next round. */ - endpoint?: string | null; + nextScheduleTime?: string | null; /** - * Full resource name of ml engine model Version. Format: `projects/{project\}/models/{model\}/versions/{version\}`. + * YAML schema file uri describing the format of a single instance, which are given to format this Endpoint's prediction (and explanation). If not set, we will generate predict schema from collected predict requests. */ - version?: string | null; - } - /** - * Config of migrating one resource from automl.googleapis.com, datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequest { + predictInstanceSchemaUri?: string | null; /** - * Config for migrating Dataset in automl.googleapis.com to Vertex AI's Dataset. + * Sample Predict instance, same format as PredictRequest.instances, this can be set as a replacement of ModelDeploymentMonitoringJob.predict_instance_schema_uri. If not set, we will generate predict schema from collected predict requests. */ - migrateAutomlDatasetConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlDatasetConfig; + samplePredictInstance?: any | null; /** - * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. + * Output only. Schedule state when the monitoring job is in Running state. */ - migrateAutomlModelConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlModelConfig; + scheduleState?: string | null; /** - * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's Dataset. + * Output only. The detailed state of the monitoring job. When the job is still creating, the state will be 'PENDING'. Once the job is successfully created, the state will be 'RUNNING'. Pause the job, the state will be 'PAUSED'. Resume the job, the state will return to 'RUNNING'. */ - migrateDataLabelingDatasetConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfig; + state?: string | null; /** - * Config for migrating Version in ml.googleapis.com to Vertex AI's Model. + * Stats anomalies base folder path. */ - migrateMlEngineModelVersionConfig?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateMlEngineModelVersionConfig; + statsAnomaliesBaseDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + /** + * Output only. Timestamp when this ModelDeploymentMonitoringJob was updated most recently. + */ + updateTime?: string | null; } /** - * Config for migrating Dataset in automl.googleapis.com to Vertex AI's Dataset. + * All metadata of most recent monitoring pipelines. */ - export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlDatasetConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { /** - * Required. Full resource name of automl Dataset. Format: `projects/{project\}/locations/{location\}/datasets/{dataset\}`. + * The time that most recent monitoring pipelines that is related to this run. */ - dataset?: string | null; + runTime?: string | null; /** - * Required. Display name of the Dataset in Vertex AI. System will pick a display name if unspecified. + * The status of the most recent monitoring pipeline. */ - datasetDisplayName?: string | null; + status?: Schema$GoogleRpcStatus; } /** - * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. + * ModelDeploymentMonitoringObjectiveConfig contains the pair of deployed_model_id to ModelMonitoringObjectiveConfig. */ - export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateAutomlModelConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringObjectiveConfig { /** - * Required. Full resource name of automl Model. Format: `projects/{project\}/locations/{location\}/models/{model\}`. + * The DeployedModel ID of the objective config. */ - model?: string | null; + deployedModelId?: string | null; /** - * Optional. Display name of the model in Vertex AI. System will pick a display name if unspecified. + * The objective config of for the modelmonitoring job of this deployed model. */ - modelDisplayName?: string | null; + objectiveConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfig; } /** - * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's Dataset. + * The config for scheduling monitoring job. */ - export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfig { - /** - * Required. Full resource name of data labeling Dataset. Format: `projects/{project\}/datasets/{dataset\}`. - */ - dataset?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringScheduleConfig { /** - * Optional. Display name of the Dataset in Vertex AI. System will pick a display name if unspecified. + * Required. The model monitoring job scheduling interval. It will be rounded up to next full hour. This defines how often the monitoring jobs are triggered. */ - datasetDisplayName?: string | null; + monitorInterval?: string | null; /** - * Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com to Vertex AI's SavedQuery. The specified AnnotatedDatasets have to belong to the datalabeling Dataset. + * The time window of the prediction data being included in each prediction dataset. This window specifies how long the data should be collected from historical model results for each run. If not set, ModelDeploymentMonitoringScheduleConfig.monitor_interval will be used. e.g. If currently the cutoff time is 2022-01-08 14:30:00 and the monitor_window is set to be 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 14:30:00 will be retrieved and aggregated to calculate the monitoring statistics. */ - migrateDataLabelingAnnotatedDatasetConfigs?: Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig[]; + monitorWindow?: string | null; } /** - * Config for migrating AnnotatedDataset in datalabeling.googleapis.com to Vertex AI's SavedQuery. + * A collection of metrics calculated by comparing Model's predictions on all of the test data against annotations from the test data. */ - export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluation { /** - * Required. Full resource name of data labeling AnnotatedDataset. Format: `projects/{project\}/datasets/{dataset\}/annotatedDatasets/{annotated_dataset\}`. + * Specify the configuration for bias detection. */ - annotatedDataset?: string | null; - } - /** - * Config for migrating version in ml.googleapis.com to Vertex AI's Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceRequestMigrateMlEngineModelVersionConfig { + biasConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationBiasConfig; /** - * Required. The ml.googleapis.com endpoint that this model version should be migrated from. Example values: * ml.googleapis.com * us-centrall-ml.googleapis.com * europe-west4-ml.googleapis.com * asia-east1-ml.googleapis.com + * Output only. Timestamp when this ModelEvaluation was created. */ - endpoint?: string | null; + createTime?: string | null; /** - * Required. Display name of the model in Vertex AI. System will pick a display name if unspecified. + * The display name of the ModelEvaluation. */ - modelDisplayName?: string | null; + displayName?: string | null; /** - * Required. Full resource name of ml engine model version. Format: `projects/{project\}/models/{model\}/versions/{version\}`. + * Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data. */ - modelVersion?: string | null; - } - /** - * Describes a successfully migrated resource. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MigrateResourceResponse { + explanationSpecs?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationModelEvaluationExplanationSpec[]; /** - * Migrated Dataset's resource name. + * The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of "pipeline_job_id", "evaluation_dataset_type", "evaluation_dataset_path", "row_based_metrics_path". */ - dataset?: string | null; + metadata?: any | null; /** - * Before migration, the identifier in ml.googleapis.com, automl.googleapis.com or datalabeling.googleapis.com. + * Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri */ - migratableResource?: Schema$GoogleCloudAiplatformV1beta1MigratableResource; + metrics?: any | null; /** - * Migrated Model's resource name. + * Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ - model?: string | null; - } - /** - * A trained machine learning Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Model { + metricsSchemaUri?: string | null; /** - * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not required for AutoML Models. + * Aggregated explanation metrics for the Model's prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for AutoML tabular Models. */ - artifactUri?: string | null; + modelExplanation?: Schema$GoogleCloudAiplatformV1beta1ModelExplanation; /** - * Optional. User input field to specify the base model source. Currently it only supports specifing the Model Garden models and Genie models. + * Output only. The resource name of the ModelEvaluation. */ - baseModelSource?: Schema$GoogleCloudAiplatformV1beta1ModelBaseModelSource; + name?: string | null; /** - * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not required for AutoML Models. + * All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `. */ - containerSpec?: Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec; + sliceDimensions?: string[] | null; + } + /** + * Configuration for bias detection. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationBiasConfig { /** - * Output only. Timestamp when this Model was uploaded into Vertex AI. - */ - createTime?: string | null; - /** - * Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. - */ - deployedModels?: Schema$GoogleCloudAiplatformV1beta1DeployedModelRef[]; - /** - * The description of the Model. - */ - description?: string | null; - /** - * Required. The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. - */ - displayName?: string | null; - /** - * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + * Specification for how the data should be sliced for bias. It contains a list of slices, with limitation of two slices. The first slice of data will be the slice_a. The second slice in the list (slice_b) will be compared against the first slice. If only a single slice is provided, then slice_a will be compared against "not slice_a". Below are examples with feature "education" with value "low", "medium", "high" in the dataset: Example 1: bias_slices = [{'education': 'low'\}] A single slice provided. In this case, slice_a is the collection of data with 'education' equals 'low', and slice_b is the collection of data with 'education' equals 'medium' or 'high'. Example 2: bias_slices = [{'education': 'low'\}, {'education': 'high'\}] Two slices provided. In this case, slice_a is the collection of data with 'education' equals 'low', and slice_b is the collection of data with 'education' equals 'high'. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + biasSlices?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpec; /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Positive labels selection on the target field. */ - etag?: string | null; + labels?: string[] | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationModelEvaluationExplanationSpec { /** - * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + * Explanation spec details. */ explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; /** - * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` */ - labels?: {[key: string]: string} | null; + explanationType?: string | null; + } + /** + * A collection of metrics calculated by comparing Model's predictions on a slice of the test data against ground truth annotations. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSlice { /** - * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + * Output only. Timestamp when this ModelEvaluationSlice was created. */ - metadata?: any | null; + createTime?: string | null; /** - * Output only. The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project\}/locations/{location\}/metadataStores/{metadata_store\}/artifacts/{artifact\}`. + * Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri */ - metadataArtifact?: string | null; + metrics?: any | null; /** - * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + * Output only. Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ - metadataSchemaUri?: string | null; + metricsSchemaUri?: string | null; /** - * Output only. Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or saved and tuned from Genie or Model Garden. + * Output only. Aggregated explanation metrics for the Model's prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for tabular Models. */ - modelSourceInfo?: Schema$GoogleCloudAiplatformV1beta1ModelSourceInfo; + modelExplanation?: Schema$GoogleCloudAiplatformV1beta1ModelExplanation; /** - * The resource name of the Model. + * Output only. The resource name of the ModelEvaluationSlice. */ name?: string | null; /** - * Output only. If this Model is a copy of another Model, this contains info about the original. + * Output only. The slice of the test data that is used to evaluate the Model. */ - originalModelInfo?: Schema$GoogleCloudAiplatformV1beta1ModelOriginalModelInfo; + slice?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSlice; + } + /** + * Definition of a slice. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSlice { /** - * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + * Output only. The dimension of the slice. Well-known dimensions are: * `annotationSpec`: This slice is on the test data that has either ground truth or prediction with AnnotationSpec.display_name equals to value. * `slice`: This slice is a user customized slice defined by its SliceSpec. */ - predictSchemata?: Schema$GoogleCloudAiplatformV1beta1PredictSchemata; + dimension?: string | null; /** - * Output only. When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + * Output only. Specification for how the data was sliced. */ - supportedDeploymentResourcesTypes?: string[] | null; + sliceSpec?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpec; /** - * Output only. The formats in which this Model may be exported. If empty, this Model is not available for export. + * Output only. The value of the dimension in this slice. */ - supportedExportFormats?: Schema$GoogleCloudAiplatformV1beta1ModelExportFormat[]; + value?: string | null; + } + /** + * Specification for how the data should be sliced. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpec { /** - * Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + * Mapping configuration for this SliceSpec. The key is the name of the feature. By default, the key will be prefixed by "instance" as a dictionary prefix for Vertex Batch Predictions output format. */ - supportedInputStorageFormats?: string[] | null; + configs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecSliceConfig; + } | null; + } + /** + * A range of values for slice(s). `low` is inclusive, `high` is exclusive. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecRange { /** - * Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + * Exclusive high value for the range. */ - supportedOutputStorageFormats?: string[] | null; + high?: number | null; /** - * Output only. The resource name of the TrainingPipeline that uploaded this Model, if any. + * Inclusive low value for the range. */ - trainingPipeline?: string | null; + low?: number | null; + } + /** + * Specification message containing the config for this SliceSpec. When `kind` is selected as `value` and/or `range`, only a single slice will be computed. When `all_values` is present, a separate slice will be computed for each possible label/value for the corresponding key in `config`. Examples, with feature zip_code with values 12345, 23334, 88888 and feature country with values "US", "Canada", "Mexico" in the dataset: Example 1: { "zip_code": { "value": { "float_value": 12345.0 \} \} \} A single slice for any data with zip_code 12345 in the dataset. Example 2: { "zip_code": { "range": { "low": 12345, "high": 20000 \} \} \} A single slice containing data where the zip_codes between 12345 and 20000 For this example, data with the zip_code of 12345 will be in this slice. Example 3: { "zip_code": { "range": { "low": 10000, "high": 20000 \} \}, "country": { "value": { "string_value": "US" \} \} \} A single slice containing data where the zip_codes between 10000 and 20000 has the country "US". For this example, data with the zip_code of 12345 and country "US" will be in this slice. Example 4: { "country": {"all_values": { "value": true \} \} \} Three slices are computed, one for each unique country in the dataset. Example 5: { "country": { "all_values": { "value": true \} \}, "zip_code": { "value": { "float_value": 12345.0 \} \} \} Three slices are computed, one for each unique country in the dataset where the zip_code is also 12345. For this example, data with zip_code 12345 and country "US" will be in one slice, zip_code 12345 and country "Canada" in another slice, and zip_code 12345 and country "Mexico" in another slice, totaling 3 slices. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecSliceConfig { /** - * Output only. Timestamp when this Model was most recently updated. + * If all_values is set to true, then all possible labels of the keyed feature will have another slice computed. Example: `{"all_values":{"value":true\}\}` */ - updateTime?: string | null; + allValues?: boolean | null; /** - * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project\}/locations/{location\}/models/{model_id\}@{version_alias\}` instead of auto-generated version id (i.e. `projects/{project\}/locations/{location\}/models/{model_id\}@{version_id\})`. The format is a-z{0,126\}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + * A range of values for a numerical feature. Example: `{"range":{"low":10000.0,"high":50000.0\}\}` will capture 12345 and 23334 in the slice. */ - versionAliases?: string[] | null; + range?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecRange; /** - * Output only. Timestamp when this version was created. + * A unique specific value for a given feature. Example: `{ "value": { "string_value": "12345" \} \}` */ - versionCreateTime?: string | null; + value?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecValue; + } + /** + * Single value that supports strings and floats. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecValue { /** - * The description of this version. + * Float type. */ - versionDescription?: string | null; + floatValue?: number | null; /** - * Output only. Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + * String type. */ - versionId?: string | null; + stringValue?: string | null; + } + /** + * Aggregated explanation metrics for a Model over a set of instances. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelExplanation { /** - * Output only. Timestamp when this version was most recently updated. + * Output only. Aggregated attributions explaining the Model's prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated. */ - versionUpdateTime?: string | null; + meanAttributions?: Schema$GoogleCloudAiplatformV1beta1Attribution[]; } /** - * User input field to specify the base model source. Currently it only supports specifing the Model Garden models and Genie models. + * Represents export format supported by the Model. All formats export to Google Cloud Storage. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelBaseModelSource { + export interface Schema$GoogleCloudAiplatformV1beta1ModelExportFormat { /** - * Information about the base model of Genie models. + * Output only. The content of this Model that may be exported. */ - genieSource?: Schema$GoogleCloudAiplatformV1beta1GenieSource; + exportableContents?: string[] | null; /** - * Source information of Model Garden models. + * Output only. The ID of the export format. The possible format IDs are: * `tflite` Used for Android mobile devices. * `edgetpu-tflite` Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices. * `tf-saved-model` A tensorflow model in SavedModel format. * `tf-js` A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used in the browser and in Node.js using JavaScript. * `core-ml` Used for iOS mobile devices. * `custom-trained` A Model that was uploaded or trained by custom code. */ - modelGardenSource?: Schema$GoogleCloudAiplatformV1beta1ModelGardenSource; + id?: string | null; } /** - * Specification of a container for serving predictions. Some fields in this message correspond to fields in the [Kubernetes Container v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + * Contains information about the source of the models generated from Model Garden. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ModelGardenSource { /** - * Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify this field as an array of executable and arguments, similar to a Docker `CMD`'s "default parameters" form. If you don't specify this field but do specify the command field, then the command from the `command` field runs without any additional arguments. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). If you don't specify this field and don't specify the `command` field, then the container's [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and `CMD` determine what runs based on their default behavior. See the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `args` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + * Required. The model garden source model resource name. */ - args?: string[] | null; + publicModelName?: string | null; + } + /** + * Vertex AI Model Monitoring Service serves as a central hub for the analysis and visualization of data quality and performance related to models. ModelMonitor stands as a top level resource for overseeing your model monitoring tasks. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitor { /** - * Immutable. Specifies the command that runs when the container starts. This overrides the container's [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). Specify this field as an array of executable and arguments, similar to a Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not specify this field, then the container's `ENTRYPOINT` runs, in conjunction with the args field or the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either exists. If this field is not specified and the container does not have an `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). If you specify this field, then you can also specify the `args` field to provide additional arguments for this command. However, if you specify this field, then the container's `CMD` is ignored. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). In this field, you can reference [environment variables set by Vertex AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `command` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + * Output only. Timestamp when this ModelMonitor was created. */ - command?: string[] | null; + createTime?: string | null; /** - * Immutable. Deployment timeout. Limit for deployment timeout is 2 hours. + * The display name of the ModelMonitor. The name can be up to 128 characters long and can consist of any UTF-8. */ - deploymentTimeout?: string | null; + displayName?: string | null; /** - * Immutable. List of environment variables to set in the container. After the container starts running, code running in the container can read these environment variables. Additionally, the command and args fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": "foo" \}, { "name": "VAR_2", "value": "$(VAR_1) bar" \} ] ``` If you switch the order of the variables in the example, then the expansion does not occur. This field corresponds to the `env` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + * Optional model explanation spec. It is used for feature attribution monitoring. */ - env?: Schema$GoogleCloudAiplatformV1beta1EnvVar[]; + explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; /** - * Immutable. List of ports to expose from the container. Vertex AI sends gRPC prediction requests that it receives to the first port on this list. Vertex AI also sends liveness and health checks to this port. If you do not specify this field, gRPC requests to the container will be disabled. Vertex AI does not use ports other than the first one listed. This field corresponds to the `ports` field of the Kubernetes Containers v1 core API. + * Monitoring Schema is to specify the model's features, prediction outputs and ground truth properties. It is used to extract pertinent data from the dataset and to process features based on their properties. Make sure that the schema aligns with your dataset, if it does not, we will be unable to extract data from the dataset. It is required for most models, but optional for Vertex AI AutoML Tables unless the schem information is not available. */ - grpcPorts?: Schema$GoogleCloudAiplatformV1beta1Port[]; + modelMonitoringSchema?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchema; /** - * Immutable. Specification for Kubernetes readiness probe. + * The entity that is subject to analysis. Currently only models in Vertex AI Model Registry are supported. If you want to analyze the model which is outside the Vertex AI, you could register a model in Vertex AI Model Registry using just a display name. */ - healthProbe?: Schema$GoogleCloudAiplatformV1beta1Probe; + modelMonitoringTarget?: Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTarget; /** - * Immutable. HTTP path on the container to send health checks to. Vertex AI intermittently sends GET requests to this path on the container's IP address and port to check that the container is healthy. Read more about [health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). For example, if you set this field to `/bar`, then Vertex AI intermittently sends a GET request to the `/bar` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/ DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + * Immutable. Resource name of the ModelMonitor. Format: `projects/{project\}/locations/{location\}/modelMonitors/{model_monitor\}`. */ - healthRoute?: string | null; + name?: string | null; /** - * Required. Immutable. URI of the Docker image to be used as the custom container for serving predictions. This URI must identify an image in Artifact Registry or Container Registry. Learn more about the [container publishing requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), including permissions requirements for the Vertex AI Service Agent. The container image is ingested upon ModelService.UploadModel, stored internally, and this original path is afterwards not used. To learn about the requirements for the Docker image itself, see [Custom container requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). You can use the URI to one of Vertex AI's [pre-built container images for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) in this field. + * Optional default notification spec, it can be overridden in the ModelMonitoringJob notification spec. */ - imageUri?: string | null; + notificationSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpec; /** - * Immutable. List of ports to expose from the container. Vertex AI sends any prediction requests that it receives to the first port on this list. Vertex AI also sends [liveness and health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) to this port. If you do not specify this field, it defaults to following value: ```json [ { "containerPort": 8080 \} ] ``` Vertex AI does not use ports other than the first one listed. This field corresponds to the `ports` field of the Kubernetes Containers [v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). + * Optional default monitoring metrics/logs export spec, it can be overridden in the ModelMonitoringJob output spec. If not specified, a default Google Cloud Storage bucket will be created under your project. */ - ports?: Schema$GoogleCloudAiplatformV1beta1Port[]; + outputSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringOutputSpec; /** - * Immutable. HTTP path on the container to send prediction requests to. Vertex AI forwards requests sent using projects.locations.endpoints.predict to this path on the container's IP address and port. Vertex AI then returns the container's response in the API response. For example, if you set this field to `/foo`, then when Vertex AI receives a prediction request, it forwards the request body in a POST request to the `/foo` path on the port of your container specified by the first value of this `ModelContainerSpec`'s ports field. If you don't specify this field, it defaults to the following value when you deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: * ENDPOINT: The last segment (following `endpoints/`)of the Endpoint.name][] field of the Endpoint where this Model has been deployed. (Vertex AI makes this value available to your container code as the [`AIP_ENDPOINT_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` environment variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) + * Optional default tabular model monitoring objective. */ - predictRoute?: string | null; + tabularObjective?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecTabularObjective; /** - * Immutable. The amount of the VM memory to reserve as the shared memory for the model in megabytes. + * Optional training dataset used to train the model. It can serve as a reference dataset to identify changes in production. */ - sharedMemorySizeMb?: string | null; + trainingDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput; /** - * Immutable. Specification for Kubernetes startup probe. + * Output only. Timestamp when this ModelMonitor was updated most recently. */ - startupProbe?: Schema$GoogleCloudAiplatformV1beta1Probe; + updateTime?: string | null; } /** - * ModelDeploymentMonitoringBigQueryTable specifies the BigQuery table name as well as some information of the logs stored in this table. + * Represents a single monitoring alert. This is currently used in the SearchModelMonitoringAlerts api, thus the alert wrapped in this message belongs to the resource asked in the request. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringBigQueryTable { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlert { /** - * The created BigQuery table to store logs. Customer could do their own query & analysis. Format: `bq://.model_deployment_monitoring_._` + * Alert creation time. */ - bigqueryTablePath?: string | null; + alertTime?: string | null; /** - * The source of log. + * Anomaly details. */ - logSource?: string | null; + anomaly?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomaly; /** - * The type of log. + * One of the supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` */ - logType?: string | null; + objectiveType?: string | null; /** - * Output only. The schema version of the request/response logging BigQuery table. Default to v1 if unset. + * The stats name. */ - requestResponseLoggingSchemaVersion?: string | null; + statsName?: string | null; } /** - * Represents a job that runs periodically to monitor the deployed models in an endpoint. It will analyze the logged training & prediction data to detect any abnormal behaviors. + * Monitoring alert triggered condition. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJob { - /** - * YAML schema file uri describing the format of a single instance that you want Tensorflow Data Validation (TFDV) to analyze. If this field is empty, all the feature data types are inferred from predict_instance_schema_uri, meaning that TFDV will use the data in the exact format(data type) as prediction request/response. If there are any data type differences between predict instance and TFDV instance, this field can be used to override the schema. For models trained with Vertex AI, this field must be set as all the fields in predict instance formatted as string. - */ - analysisInstanceSchemaUri?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition { /** - * Output only. The created bigquery tables for the job under customer project. Customer could do their own query & analysis. There could be 4 log tables in maximum: 1. Training data logging predict request/response 2. Serving data logging predict request/response + * A condition that compares a stats value against a threshold. Alert will be triggered if value above the threshold. */ - bigqueryTables?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringBigQueryTable[]; + threshold?: number | null; + } + /** + * The alert config for model monitoring. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfig { /** - * Output only. Timestamp when this ModelDeploymentMonitoringJob was created. + * Email alert config. */ - createTime?: string | null; + emailAlertConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfigEmailAlertConfig; /** - * Required. The user-defined name of the ModelDeploymentMonitoringJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. Display name of a ModelDeploymentMonitoringJob. + * Dump the anomalies to Cloud Logging. The anomalies will be put to json payload encoded from proto google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry. This can be further sinked to Pub/Sub or any other services supported by Cloud Logging. */ - displayName?: string | null; + enableLogging?: boolean | null; /** - * If true, the scheduled monitoring pipeline logs are sent to Google Cloud Logging, including pipeline status and anomalies detected. Please note the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging#pricing). + * Resource names of the NotificationChannels to send alert. Must be of the format `projects//notificationChannels/` */ - enableMonitoringPipelineLogs?: boolean | null; + notificationChannels?: string[] | null; + } + /** + * The config for email alert. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfigEmailAlertConfig { /** - * Customer-managed encryption key spec for a ModelDeploymentMonitoringJob. If set, this ModelDeploymentMonitoringJob and all sub-resources of this ModelDeploymentMonitoringJob will be secured by this key. + * The email addresses to send the alert. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + userEmails?: string[] | null; + } + /** + * Represents a single model monitoring anomaly. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomaly { /** - * Required. Endpoint resource name. Format: `projects/{project\}/locations/{location\}/endpoints/{endpoint\}` + * Algorithm used to calculated the metrics, eg: jensen_shannon_divergence, l_infinity. */ - endpoint?: string | null; + algorithm?: string | null; /** - * Output only. Only populated when the job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + * Model monitoring job resource name. */ - error?: Schema$GoogleRpcStatus; + modelMonitoringJob?: string | null; /** - * The labels with user-defined metadata to organize your ModelDeploymentMonitoringJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Tabular anomaly. */ - labels?: {[key: string]: string} | null; + tabularAnomaly?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomalyTabularAnomaly; + } + /** + * Tabular anomaly details. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomalyTabularAnomaly { /** - * Output only. Latest triggered monitoring pipeline metadata. + * Anomaly body. */ - latestMonitoringPipelineMetadata?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata; + anomaly?: any | null; /** - * Required. Sample Strategy for logging. + * Additional anomaly information. e.g. Google Cloud Storage uri. */ - loggingSamplingStrategy?: Schema$GoogleCloudAiplatformV1beta1SamplingStrategy; + anomalyUri?: string | null; /** - * The TTL of BigQuery tables in user projects which stores logs. A day is the basic unit of the TTL and we take the ceil of TTL/86400(a day). e.g. { second: 3600\} indicates ttl = 1 day. + * The alert condition associated with this anomaly. */ - logTtl?: string | null; + condition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; /** - * Required. The config for monitoring objectives. This is a per DeployedModel config. Each DeployedModel needs to be configured separately. + * Overview of this anomaly. */ - modelDeploymentMonitoringObjectiveConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringObjectiveConfig[]; + summary?: string | null; /** - * Required. Schedule config for running the monitoring job. + * The time the anomaly was triggered. */ - modelDeploymentMonitoringScheduleConfig?: Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringScheduleConfig; + triggerTime?: string | null; + } + /** + * The model monitoring configuration used for Batch Prediction Job. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringConfig { /** - * Alert config for model monitoring. + * Model monitoring alert config. */ - modelMonitoringAlertConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfig; + alertConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfig; /** - * Output only. Resource name of a ModelDeploymentMonitoringJob. + * YAML schema file uri in Cloud Storage describing the format of a single instance that you want Tensorflow Data Validation (TFDV) to analyze. If there are any data type differences between predict instance and TFDV instance, this field can be used to override the schema. For models trained with Vertex AI, this field must be set as all the fields in predict instance formatted as string. */ - name?: string | null; + analysisInstanceSchemaUri?: string | null; /** - * Output only. Timestamp when this monitoring pipeline will be scheduled to run for the next round. + * Model monitoring objective config. */ - nextScheduleTime?: string | null; + objectiveConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfig[]; /** - * YAML schema file uri describing the format of a single instance, which are given to format this Endpoint's prediction (and explanation). If not set, we will generate predict schema from collected predict requests. + * A Google Cloud Storage location for batch prediction model monitoring to dump statistics and anomalies. If not provided, a folder will be created in customer project to hold statistics and anomalies. */ - predictInstanceSchemaUri?: string | null; + statsAnomaliesBaseDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + } + /** + * Model monitoring data input spec. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput { /** - * Sample Predict instance, same format as PredictRequest.instances, this can be set as a replacement of ModelDeploymentMonitoringJob.predict_instance_schema_uri. If not set, we will generate predict schema from collected predict requests. + * Vertex AI Batch prediction Job. */ - samplePredictInstance?: any | null; + batchPredictionOutput?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputBatchPredictionOutput; /** - * Output only. Schedule state when the monitoring job is in Running state. + * Columnized dataset. */ - scheduleState?: string | null; + columnizedDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDataset; /** - * Output only. The detailed state of the monitoring job. When the job is still creating, the state will be 'PENDING'. Once the job is successfully created, the state will be 'RUNNING'. Pause the job, the state will be 'PAUSED'. Resume the job, the state will return to 'RUNNING'. + * The time interval (pair of start_time and end_time) for which results should be returned. */ - state?: string | null; + timeInterval?: Schema$GoogleTypeInterval; /** - * Stats anomalies base folder path. + * The time offset setting for which results should be returned. */ - statsAnomaliesBaseDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + timeOffset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputTimeOffset; /** - * Output only. Timestamp when this ModelDeploymentMonitoringJob was updated most recently. + * Vertex AI Endpoint request & response logging. */ - updateTime?: string | null; + vertexEndpointLogs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputVertexEndpointLogs; } /** - * All metadata of most recent monitoring pipelines. + * Data from Vertex AI Batch prediction job output. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputBatchPredictionOutput { /** - * The time that most recent monitoring pipelines that is related to this run. + * Vertex AI Batch prediction job resource name. The job must match the model version specified in [ModelMonitor].[model_monitoring_target]. */ - runTime?: string | null; + batchPredictionJob?: string | null; + } + /** + * Input dataset spec. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDataset { /** - * The status of the most recent monitoring pipeline. + * BigQuery data source. */ - status?: Schema$GoogleRpcStatus; + bigquerySource?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringBigQuerySource; + /** + * Google Cloud Storage data source. + */ + gcsSource?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringGcsSource; + /** + * The timestamp field. Usually for serving data. + */ + timestampField?: string | null; + /** + * Resource name of the Vertex AI managed dataset. + */ + vertexDataset?: string | null; } /** - * ModelDeploymentMonitoringObjectiveConfig contains the pair of deployed_model_id to ModelMonitoringObjectiveConfig. + * Dataset spec for data sotred in BigQuery. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringObjectiveConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringBigQuerySource { /** - * The DeployedModel ID of the objective config. + * Standard SQL to be used instead of the `table_uri`. */ - deployedModelId?: string | null; + query?: string | null; /** - * The objective config of for the modelmonitoring job of this deployed model. + * BigQuery URI to a table, up to 2000 characters long. All the columns in the table will be selected. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. */ - objectiveConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfig; + tableUri?: string | null; } /** - * The config for scheduling monitoring job. + * Dataset spec for data stored in Google Cloud Storage. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelDeploymentMonitoringScheduleConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringGcsSource { /** - * Required. The model monitoring job scheduling interval. It will be rounded up to next full hour. This defines how often the monitoring jobs are triggered. + * Data format of the dataset. */ - monitorInterval?: string | null; + format?: string | null; /** - * The time window of the prediction data being included in each prediction dataset. This window specifies how long the data should be collected from historical model results for each run. If not set, ModelDeploymentMonitoringScheduleConfig.monitor_interval will be used. e.g. If currently the cutoff time is 2022-01-08 14:30:00 and the monitor_window is set to be 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 14:30:00 will be retrieved and aggregated to calculate the monitoring statistics. + * Google Cloud Storage URI to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. */ - monitorWindow?: string | null; + gcsUri?: string | null; } /** - * A collection of metrics calculated by comparing Model's predictions on all of the test data against annotations from the test data. + * Time offset setting. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluation { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputTimeOffset { /** - * Specify the configuration for bias detection. + * [offset] is the time difference from the cut-off time. For scheduled jobs, the cut-off time is the scheduled time. For non-scheduled jobs, it's the time when the job was created. Currently we support the following format: 'w|W': Week, 'd|D': Day, 'h|H': Hour E.g. '1h' stands for 1 hour, '2d' stands for 2 days. */ - biasConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationBiasConfig; + offset?: string | null; /** - * Output only. Timestamp when this ModelEvaluation was created. + * [window] refers to the scope of data selected for analysis. It allows you to specify the quantity of data you wish to examine. Currently we support the following format: 'w|W': Week, 'd|D': Day, 'h|H': Hour E.g. '1h' stands for 1 hour, '2d' stands for 2 days. + */ + window?: string | null; + } + /** + * Data from Vertex AI Endpoint request response logging. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputVertexEndpointLogs { + /** + * List of endpoint resource names. The endpoints must enable the logging with the [Endpoint].[request_response_logging_config], and must contain the deployed model corresponding to the model version specified in [ModelMonitor].[model_monitoring_target]. + */ + endpoints?: string[] | null; + } + /** + * Represents a model monitoring job that analyze dataset using different monitoring algorithm. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJob { + /** + * Output only. Timestamp when this ModelMonitoringJob was created. */ createTime?: string | null; /** - * The display name of the ModelEvaluation. + * The display name of the ModelMonitoringJob. The name can be up to 128 characters long and can consist of any UTF-8. */ displayName?: string | null; /** - * Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data. + * Output only. Execution results for all the monitoring objectives. */ - explanationSpecs?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationModelEvaluationExplanationSpec[]; + jobExecutionDetail?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetail; /** - * The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of "pipeline_job_id", "evaluation_dataset_type", "evaluation_dataset_path", "row_based_metrics_path". + * Monitoring monitoring job spec. It outlines the specifications for monitoring objectives, notifications, and result exports. If left blank, the default monitoring specifications from the top-level resource 'ModelMonitor' will be applied. If provided, we will use the specification defined here rather than the default one. */ - metadata?: any | null; + modelMonitoringSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSpec; /** - * Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri + * Output only. Resource name of a ModelMonitoringJob. Format: `projects/{project_id\}/locations/{location_id\}/modelMonitors/{model_monitor_id\}/modelMonitoringJobs/{model_monitoring_job_id\}` */ - metrics?: any | null; + name?: string | null; /** - * Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). + * Output only. Schedule resource name. It will only appear when this job is triggered by a schedule. */ - metricsSchemaUri?: string | null; + schedule?: string | null; /** - * Aggregated explanation metrics for the Model's prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for AutoML tabular Models. + * Output only. Timestamp when this ModelMonitoringJob was scheduled. It will only appear when this job is triggered by a schedule. */ - modelExplanation?: Schema$GoogleCloudAiplatformV1beta1ModelExplanation; + scheduleTime?: string | null; /** - * Output only. The resource name of the ModelEvaluation. + * Output only. The state of the monitoring job. * When the job is still creating, the state will be 'JOB_STATE_PENDING'. * Once the job is successfully created, the state will be 'JOB_STATE_RUNNING'. * Once the job is finished, the state will be one of 'JOB_STATE_FAILED', 'JOB_STATE_SUCCEEDED', 'JOB_STATE_PARTIALLY_SUCCEEDED'. */ - name?: string | null; + state?: string | null; /** - * All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `. + * Output only. Timestamp when this ModelMonitoringJob was updated most recently. */ - sliceDimensions?: string[] | null; + updateTime?: string | null; } /** - * Configuration for bias detection. + * Represent the execution details of the job. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationBiasConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetail { /** - * Specification for how the data should be sliced for bias. It contains a list of slices, with limitation of two slices. The first slice of data will be the slice_a. The second slice in the list (slice_b) will be compared against the first slice. If only a single slice is provided, then slice_a will be compared against "not slice_a". Below are examples with feature "education" with value "low", "medium", "high" in the dataset: Example 1: bias_slices = [{'education': 'low'\}] A single slice provided. In this case, slice_a is the collection of data with 'education' equals 'low', and slice_b is the collection of data with 'education' equals 'medium' or 'high'. Example 2: bias_slices = [{'education': 'low'\}, {'education': 'high'\}] Two slices provided. In this case, slice_a is the collection of data with 'education' equals 'low', and slice_b is the collection of data with 'education' equals 'high'. + * Processed baseline datasets. */ - biasSlices?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpec; + baselineDatasets?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetailProcessedDataset[]; /** - * Positive labels selection on the target field. + * Additional job error status. */ - labels?: string[] | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationModelEvaluationExplanationSpec { + error?: Schema$GoogleRpcStatus; /** - * Explanation spec details. + * Status of data processing for each monitoring objective. Key is the objective. */ - explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; + objectiveStatus?: {[key: string]: Schema$GoogleRpcStatus} | null; /** - * Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai` + * Processed target datasets. */ - explanationType?: string | null; + targetDatasets?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetailProcessedDataset[]; } /** - * A collection of metrics calculated by comparing Model's predictions on a slice of the test data against ground truth annotations. + * Processed dataset information. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSlice { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetailProcessedDataset { /** - * Output only. Timestamp when this ModelEvaluationSlice was created. + * Actual data location of the processed dataset. */ - createTime?: string | null; + location?: string | null; /** - * Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri + * Dataset time range information if any. */ - metrics?: any | null; + timeRange?: Schema$GoogleTypeInterval; + } + /** + * Notification spec(email, notification channel) for model monitoring statistics/alerts. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpec { /** - * Output only. Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). + * Email alert config. */ - metricsSchemaUri?: string | null; + emailConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecEmailConfig; /** - * Output only. Aggregated explanation metrics for the Model's prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for tabular Models. + * Dump the anomalies to Cloud Logging. The anomalies will be put to json payload encoded from proto google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry. This can be further sinked to Pub/Sub or any other services supported by Cloud Logging. */ - modelExplanation?: Schema$GoogleCloudAiplatformV1beta1ModelExplanation; - /** - * Output only. The resource name of the ModelEvaluationSlice. - */ - name?: string | null; + enableCloudLogging?: boolean | null; /** - * Output only. The slice of the test data that is used to evaluate the Model. + * Notification channel config. */ - slice?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSlice; + notificationChannelConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecNotificationChannelConfig[]; } /** - * Definition of a slice. + * The config for email alerts. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSlice { - /** - * Output only. The dimension of the slice. Well-known dimensions are: * `annotationSpec`: This slice is on the test data that has either ground truth or prediction with AnnotationSpec.display_name equals to value. * `slice`: This slice is a user customized slice defined by its SliceSpec. - */ - dimension?: string | null; - /** - * Output only. Specification for how the data was sliced. - */ - sliceSpec?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpec; + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecEmailConfig { /** - * Output only. The value of the dimension in this slice. + * The email addresses to send the alerts. */ - value?: string | null; + userEmails?: string[] | null; } /** - * Specification for how the data should be sliced. + * Google Cloud Notification Channel config. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecNotificationChannelConfig { /** - * Mapping configuration for this SliceSpec. The key is the name of the feature. By default, the key will be prefixed by "instance" as a dictionary prefix for Vertex Batch Predictions output format. + * Resource names of the NotificationChannels. Must be of the format `projects//notificationChannels/` */ - configs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecSliceConfig; - } | null; + notificationChannel?: string | null; } /** - * A range of values for slice(s). `low` is inclusive, `high` is exclusive. + * The objective configuration for model monitoring, including the information needed to detect anomalies for one particular model. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecRange { - /** - * Exclusive high value for the range. - */ - high?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfig { /** - * Inclusive low value for the range. + * The config for integrating with Vertex Explainable AI. */ - low?: number | null; - } - /** - * Specification message containing the config for this SliceSpec. When `kind` is selected as `value` and/or `range`, only a single slice will be computed. When `all_values` is present, a separate slice will be computed for each possible label/value for the corresponding key in `config`. Examples, with feature zip_code with values 12345, 23334, 88888 and feature country with values "US", "Canada", "Mexico" in the dataset: Example 1: { "zip_code": { "value": { "float_value": 12345.0 \} \} \} A single slice for any data with zip_code 12345 in the dataset. Example 2: { "zip_code": { "range": { "low": 12345, "high": 20000 \} \} \} A single slice containing data where the zip_codes between 12345 and 20000 For this example, data with the zip_code of 12345 will be in this slice. Example 3: { "zip_code": { "range": { "low": 10000, "high": 20000 \} \}, "country": { "value": { "string_value": "US" \} \} \} A single slice containing data where the zip_codes between 10000 and 20000 has the country "US". For this example, data with the zip_code of 12345 and country "US" will be in this slice. Example 4: { "country": {"all_values": { "value": true \} \} \} Three slices are computed, one for each unique country in the dataset. Example 5: { "country": { "all_values": { "value": true \} \}, "zip_code": { "value": { "float_value": 12345.0 \} \} \} Three slices are computed, one for each unique country in the dataset where the zip_code is also 12345. For this example, data with zip_code 12345 and country "US" will be in one slice, zip_code 12345 and country "Canada" in another slice, and zip_code 12345 and country "Mexico" in another slice, totaling 3 slices. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecSliceConfig { + explanationConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfig; /** - * If all_values is set to true, then all possible labels of the keyed feature will have another slice computed. Example: `{"all_values":{"value":true\}\}` + * The config for drift of prediction data. */ - allValues?: boolean | null; + predictionDriftDetectionConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig; /** - * A range of values for a numerical feature. Example: `{"range":{"low":10000.0,"high":50000.0\}\}` will capture 12345 and 23334 in the slice. + * Training dataset for models. This field has to be set only if TrainingPredictionSkewDetectionConfig is specified. */ - range?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecRange; + trainingDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingDataset; /** - * A unique specific value for a given feature. Example: `{ "value": { "string_value": "12345" \} \}` + * The config for skew between training data and prediction data. */ - value?: Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecValue; + trainingPredictionSkewDetectionConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig; } /** - * Single value that supports strings and floats. + * The config for integrating with Vertex Explainable AI. Only applicable if the Model has explanation_spec populated. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelEvaluationSliceSliceSliceSpecValue { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfig { /** - * Float type. + * If want to analyze the Vertex Explainable AI feature attribute scores or not. If set to true, Vertex AI will log the feature attributions from explain response and do the skew/drift detection for them. */ - floatValue?: number | null; + enableFeatureAttributes?: boolean | null; /** - * String type. + * Predictions generated by the BatchPredictionJob using baseline dataset. */ - stringValue?: string | null; + explanationBaseline?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline; } /** - * Aggregated explanation metrics for a Model over a set of instances. + * Output from BatchPredictionJob for Model Monitoring baseline dataset, which can be used to generate baseline attribution scores. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelExplanation { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline { /** - * Output only. Aggregated attributions explaining the Model's prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated. + * BigQuery location for BatchExplain output. */ - meanAttributions?: Schema$GoogleCloudAiplatformV1beta1Attribution[]; - } - /** - * Represents export format supported by the Model. All formats export to Google Cloud Storage. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelExportFormat { + bigquery?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; /** - * Output only. The content of this Model that may be exported. + * Cloud Storage location for BatchExplain output. */ - exportableContents?: string[] | null; + gcs?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; /** - * Output only. The ID of the export format. The possible format IDs are: * `tflite` Used for Android mobile devices. * `edgetpu-tflite` Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices. * `tf-saved-model` A tensorflow model in SavedModel format. * `tf-js` A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used in the browser and in Node.js using JavaScript. * `core-ml` Used for iOS mobile devices. * `custom-trained` A Model that was uploaded or trained by custom code. + * The storage format of the predictions generated BatchPrediction job. */ - id?: string | null; + predictionFormat?: string | null; } /** - * Contains information about the source of the models generated from Model Garden. + * The config for Prediction data drift detection. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelGardenSource { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig { /** - * Required. The model garden source model resource name. + * Key is the feature name and value is the threshold. The threshold here is against attribution score distance between different time windows. */ - publicModelName?: string | null; - } - /** - * Vertex AI Model Monitoring Service serves as a central hub for the analysis and visualization of data quality and performance related to models. ModelMonitor stands as a top level resource for overseeing your model monitoring tasks. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitor { + attributionScoreDriftThresholds?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; + } | null; /** - * Output only. Timestamp when this ModelMonitor was created. + * Drift anomaly detection threshold used by all features. When the per-feature thresholds are not set, this field can be used to specify a threshold for all features. */ - createTime?: string | null; + defaultDriftThreshold?: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; /** - * The display name of the ModelMonitor. The name can be up to 128 characters long and can consist of any UTF-8. + * Key is the feature name and value is the threshold. If a feature needs to be monitored for drift, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between different time windws. */ - displayName?: string | null; + driftThresholds?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; + } | null; + } + /** + * Training Dataset information. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingDataset { /** - * Optional model explanation spec. It is used for feature attribution monitoring. + * The BigQuery table of the unmanaged Dataset used to train this Model. */ - explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; + bigquerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; /** - * Monitoring Schema is to specify the model's features, prediction outputs and ground truth properties. It is used to extract pertinent data from the dataset and to process features based on their properties. Make sure that the schema aligns with your dataset, if it does not, we will be unable to extract data from the dataset. It is required for most models, but optional for Vertex AI AutoML Tables unless the schem information is not available. + * Data format of the dataset, only applicable if the input is from Google Cloud Storage. The possible formats are: "tf-record" The source file is a TFRecord file. "csv" The source file is a CSV file. "jsonl" The source file is a JSONL file. */ - modelMonitoringSchema?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchema; + dataFormat?: string | null; /** - * The entity that is subject to analysis. Currently only models in Vertex AI Model Registry are supported. If you want to analyze the model which is outside the Vertex AI, you could register a model in Vertex AI Model Registry using just a display name. + * The resource name of the Dataset used to train this Model. */ - modelMonitoringTarget?: Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTarget; + dataset?: string | null; /** - * Immutable. Resource name of the ModelMonitor. Format: `projects/{project\}/locations/{location\}/modelMonitors/{model_monitor\}`. + * The Google Cloud Storage uri of the unmanaged Dataset used to train this Model. */ - name?: string | null; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; /** - * Optional default notification spec, it can be overridden in the ModelMonitoringJob notification spec. + * Strategy to sample data from Training Dataset. If not set, we process the whole dataset. */ - notificationSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpec; + loggingSamplingStrategy?: Schema$GoogleCloudAiplatformV1beta1SamplingStrategy; /** - * Optional default monitoring metrics/logs export spec, it can be overridden in the ModelMonitoringJob output spec. If not specified, a default Google Cloud Storage bucket will be created under your project. + * The target field name the model is to predict. This field will be excluded when doing Predict and (or) Explain for the training data. */ - outputSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringOutputSpec; + targetField?: string | null; + } + /** + * The config for Training & Prediction data skew detection. It specifies the training dataset sources and the skew detection parameters. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig { /** - * Optional default tabular model monitoring objective. + * Key is the feature name and value is the threshold. The threshold here is against attribution score distance between the training and prediction feature. */ - tabularObjective?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecTabularObjective; + attributionScoreSkewThresholds?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; + } | null; /** - * Optional training dataset used to train the model. It can serve as a reference dataset to identify changes in production. + * Skew anomaly detection threshold used by all features. When the per-feature thresholds are not set, this field can be used to specify a threshold for all features. */ - trainingDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput; + defaultSkewThreshold?: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; /** - * Output only. Timestamp when this ModelMonitor was updated most recently. + * Key is the feature name and value is the threshold. If a feature needs to be monitored for skew, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between the training and prediction feature. */ - updateTime?: string | null; + skewThresholds?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; + } | null; } /** - * Represents a single monitoring alert. This is currently used in the SearchModelMonitoringAlerts api, thus the alert wrapped in this message belongs to the resource asked in the request. + * Monitoring objectives spec. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlert { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpec { /** - * Alert creation time. + * Baseline dataset. It could be the training dataset or production serving dataset from a previous period. */ - alertTime?: string | null; + baselineDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput; /** - * Anomaly details. + * The explanation spec. This spec is required when the objectives spec includes feature attribution objectives. */ - anomaly?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomaly; + explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; /** - * One of the supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` + * Tabular monitoring objective. */ - objectiveType?: string | null; + tabularObjective?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecTabularObjective; /** - * The stats name. + * Target dataset. */ - statsName?: string | null; + targetDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput; } /** - * Monitoring alert triggered condition. + * Data drift monitoring spec. Data drift measures the distribution distance between the current dataset and a baseline dataset. A typical use case is to detect data drift between the recent production serving dataset and the training dataset, or to compare the recent production dataset with a dataset from a previous period. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecDataDriftSpec { /** - * A condition that compares a stats value against a threshold. Alert will be triggered if value above the threshold. + * Supported metrics type: * l_infinity * jensen_shannon_divergence */ - threshold?: number | null; - } - /** - * The alert config for model monitoring. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfig { + categoricalMetricType?: string | null; /** - * Email alert config. + * Default alert condition for all the categorical features. */ - emailAlertConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfigEmailAlertConfig; + defaultCategoricalAlertCondition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; /** - * Dump the anomalies to Cloud Logging. The anomalies will be put to json payload encoded from proto google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry. This can be further sinked to Pub/Sub or any other services supported by Cloud Logging. + * Default alert condition for all the numeric features. */ - enableLogging?: boolean | null; + defaultNumericAlertCondition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; /** - * Resource names of the NotificationChannels to send alert. Must be of the format `projects//notificationChannels/` + * Per feature alert condition will override default alert condition. */ - notificationChannels?: string[] | null; - } - /** - * The config for email alert. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfigEmailAlertConfig { + featureAlertConditions?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; + } | null; /** - * The email addresses to send the alert. + * Feature names / Prediction output names interested in monitoring. These should be a subset of the input feature names or prediction output names specified in the monitoring schema. If the field is not specified all features / prediction outputs outlied in the monitoring schema will be used. */ - userEmails?: string[] | null; + features?: string[] | null; + /** + * Supported metrics type: * jensen_shannon_divergence + */ + numericMetricType?: string | null; } /** - * Represents a single model monitoring anomaly. + * Feature attribution monitoring spec. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomaly { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecFeatureAttributionSpec { /** - * Algorithm used to calculated the metrics, eg: jensen_shannon_divergence, l_infinity. + * The config of resources used by the Model Monitoring during the batch explanation for non-AutoML models. If not set, `n1-standard-2` machine type will be used by default. */ - algorithm?: string | null; + batchExplanationDedicatedResources?: Schema$GoogleCloudAiplatformV1beta1BatchDedicatedResources; /** - * Model monitoring job resource name. + * Default alert condition for all the features. */ - modelMonitoringJob?: string | null; + defaultAlertCondition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; /** - * Tabular anomaly. + * Per feature alert condition will override default alert condition. */ - tabularAnomaly?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomalyTabularAnomaly; - } - /** - * Tabular anomaly details. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAnomalyTabularAnomaly { - /** - * Anomaly body. - */ - anomaly?: any | null; + featureAlertConditions?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; + } | null; /** - * Additional anomaly information. e.g. Google Cloud Storage uri. + * Feature names interested in monitoring. These should be a subset of the input feature names specified in the monitoring schema. If the field is not specified all features outlied in the monitoring schema will be used. */ - anomalyUri?: string | null; + features?: string[] | null; + } + /** + * Tabular monitoring objective. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecTabularObjective { /** - * The alert condition associated with this anomaly. + * Feature attribution monitoring spec. */ - condition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; + featureAttributionSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecFeatureAttributionSpec; /** - * Overview of this anomaly. + * Input feature distribution drift monitoring spec. */ - summary?: string | null; + featureDriftSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecDataDriftSpec; /** - * The time the anomaly was triggered. + * Prediction output distribution drift monitoring spec. */ - triggerTime?: string | null; + predictionOutputDriftSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecDataDriftSpec; } /** - * The model monitoring configuration used for Batch Prediction Job. + * Specification for the export destination of monitoring results, including metrics, logs, etc. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringOutputSpec { /** - * Model monitoring alert config. + * Google Cloud Storage base folder path for metrics, error logs, etc. */ - alertConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertConfig; + gcsBaseDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + } + /** + * The Model Monitoring Schema definition. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchema { /** - * YAML schema file uri in Cloud Storage describing the format of a single instance that you want Tensorflow Data Validation (TFDV) to analyze. If there are any data type differences between predict instance and TFDV instance, this field can be used to override the schema. For models trained with Vertex AI, this field must be set as all the fields in predict instance formatted as string. + * Feature names of the model. Vertex AI will try to match the features from your dataset as follows: * For 'csv' files, the header names are required, and we will extract the corresponding feature values when the header names align with the feature names. * For 'jsonl' files, we will extract the corresponding feature values if the key names match the feature names. Note: Nested features are not supported, so please ensure your features are flattened. Ensure the feature values are scalar or an array of scalars. * For 'bigquery' dataset, we will extract the corresponding feature values if the column names match the feature names. Note: The column type can be a scalar or an array of scalars. STRUCT or JSON types are not supported. You may use SQL queries to select or aggregate the relevant features from your original table. However, ensure that the 'schema' of the query results meets our requirements. * For the Vertex AI Endpoint Request Response Logging table or Vertex AI Batch Prediction Job results. If the instance_type is an array, ensure that the sequence in feature_fields matches the order of features in the prediction instance. We will match the feature with the array in the order specified in [feature_fields]. */ - analysisInstanceSchemaUri?: string | null; + featureFields?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema[]; /** - * Model monitoring objective config. + * Target /ground truth names of the model. */ - objectiveConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfig[]; + groundTruthFields?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema[]; /** - * A Google Cloud Storage location for batch prediction model monitoring to dump statistics and anomalies. If not provided, a folder will be created in customer project to hold statistics and anomalies. + * Prediction output names of the model. The requirements are the same as the feature_fields. For AutoML Tables, the prediction output name presented in schema will be: `predicted_{target_column\}`, the `target_column` is the one you specified when you train the model. For Prediction output drift analysis: * AutoML Classification, the distribution of the argmax label will be analyzed. * AutoML Regression, the distribution of the value will be analyzed. */ - statsAnomaliesBaseDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + predictionFields?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema[]; } /** - * Model monitoring data input spec. + * Schema field definition. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema { /** - * Vertex AI Batch prediction Job. + * Supported data types are: `float` `integer` `boolean` `string` `categorical` */ - batchPredictionOutput?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputBatchPredictionOutput; + dataType?: string | null; /** - * Columnized dataset. + * Field name. */ - columnizedDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDataset; + name?: string | null; /** - * The time interval (pair of start_time and end_time) for which results should be returned. + * Describes if the schema field is an array of given data type. */ - timeInterval?: Schema$GoogleTypeInterval; + repeated?: boolean | null; + } + /** + * Monitoring monitoring job spec. It outlines the specifications for monitoring objectives, notifications, and result exports. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSpec { /** - * The time offset setting for which results should be returned. + * The model monitoring notification spec. */ - timeOffset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputTimeOffset; + notificationSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpec; /** - * Vertex AI Endpoint request & response logging. + * The monitoring objective spec. */ - vertexEndpointLogs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputVertexEndpointLogs; + objectiveSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpec; + /** + * The Output destination spec for metrics, error logs, etc. + */ + outputSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringOutputSpec; } /** - * Data from Vertex AI Batch prediction job output. + * Represents the collection of statistics for a metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputBatchPredictionOutput { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStats { /** - * Vertex AI Batch prediction job resource name. The job must match the model version specified in [ModelMonitor].[model_monitoring_target]. + * Generated tabular statistics. */ - batchPredictionJob?: string | null; + tabularStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringTabularStats; } /** - * Input dataset spec. + * Statistics and anomalies generated by Model Monitoring. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDataset { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomalies { /** - * BigQuery data source. + * Number of anomalies within all stats. */ - bigquerySource?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringBigQuerySource; + anomalyCount?: number | null; /** - * Google Cloud Storage data source. + * Deployed Model ID. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringGcsSource; + deployedModelId?: string | null; /** - * The timestamp field. Usually for serving data. + * A list of historical Stats and Anomalies generated for all Features. */ - timestampField?: string | null; + featureStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies[]; /** - * Resource name of the Vertex AI managed dataset. + * Model Monitoring Objective those stats and anomalies belonging to. */ - vertexDataset?: string | null; + objective?: string | null; } /** - * Dataset spec for data sotred in BigQuery. + * Historical Stats (and Anomalies) for a specific Feature. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringBigQuerySource { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { /** - * Standard SQL to be used instead of the `table_uri`. + * Display Name of the Feature. */ - query?: string | null; + featureDisplayName?: string | null; /** - * BigQuery URI to a table, up to 2000 characters long. All the columns in the table will be selected. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. + * A list of historical stats generated by different time window's Prediction Dataset. */ - tableUri?: string | null; - } - /** - * Dataset spec for data stored in Google Cloud Storage. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputModelMonitoringDatasetModelMonitoringGcsSource { + predictionStats?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly[]; /** - * Data format of the dataset. + * Threshold for anomaly detection. */ - format?: string | null; + threshold?: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; /** - * Google Cloud Storage URI to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + * Stats calculated for the Training Dataset. */ - gcsUri?: string | null; + trainingStats?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly; } /** - * Time offset setting. + * Represents a single statistics data point. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputTimeOffset { - /** - * [offset] is the time difference from the cut-off time. For scheduled jobs, the cut-off time is the scheduled time. For non-scheduled jobs, it's the time when the job was created. Currently we support the following format: 'w|W': Week, 'd|D': Day, 'h|H': Hour E.g. '1h' stands for 1 hour, '2d' stands for 2 days. - */ - offset?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPoint { /** - * [window] refers to the scope of data selected for analysis. It allows you to specify the quantity of data you wish to examine. Currently we support the following format: 'w|W': Week, 'd|D': Day, 'h|H': Hour E.g. '1h' stands for 1 hour, '2d' stands for 2 days. + * Algorithm used to calculated the metrics, eg: jensen_shannon_divergence, l_infinity. */ - window?: string | null; - } - /** - * Data from Vertex AI Endpoint request response logging. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInputVertexEndpointLogs { + algorithm?: string | null; /** - * List of endpoint resource names. The endpoints must enable the logging with the [Endpoint].[request_response_logging_config], and must contain the deployed model corresponding to the model version specified in [ModelMonitor].[model_monitoring_target]. + * Statistics from baseline dataset. */ - endpoints?: string[] | null; - } - /** - * Represents a model monitoring job that analyze dataset using different monitoring algorithm. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJob { + baselineStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValue; /** - * Output only. Timestamp when this ModelMonitoringJob was created. + * Statistics create time. */ createTime?: string | null; /** - * The display name of the ModelMonitoringJob. The name can be up to 128 characters long and can consist of any UTF-8. - */ - displayName?: string | null; - /** - * Output only. Execution results for all the monitoring objectives. + * Statistics from current dataset. */ - jobExecutionDetail?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetail; + currentStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValue; /** - * Monitoring monitoring job spec. It outlines the specifications for monitoring objectives, notifications, and result exports. If left blank, the default monitoring specifications from the top-level resource 'ModelMonitor' will be applied. If provided, we will use the specification defined here rather than the default one. + * Indicate if the statistics has anomaly. */ - modelMonitoringSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSpec; + hasAnomaly?: boolean | null; /** - * Output only. Resource name of a ModelMonitoringJob. Format: `projects/{project_id\}/locations/{location_id\}/modelMonitors/{model_monitor_id\}/modelMonitoringJobs/{model_monitoring_job_id\}` + * Model monitoring job resource name. */ - name?: string | null; + modelMonitoringJob?: string | null; /** - * Output only. Schedule resource name. It will only appear when this job is triggered by a schedule. + * Schedule resource name. */ schedule?: string | null; /** - * Output only. Timestamp when this ModelMonitoringJob was scheduled. It will only appear when this job is triggered by a schedule. - */ - scheduleTime?: string | null; - /** - * Output only. The state of the monitoring job. * When the job is still creating, the state will be 'JOB_STATE_PENDING'. * Once the job is successfully created, the state will be 'JOB_STATE_RUNNING'. * Once the job is finished, the state will be one of 'JOB_STATE_FAILED', 'JOB_STATE_SUCCEEDED', 'JOB_STATE_PARTIALLY_SUCCEEDED'. - */ - state?: string | null; - /** - * Output only. Timestamp when this ModelMonitoringJob was updated most recently. + * Threshold value. */ - updateTime?: string | null; + thresholdValue?: number | null; } /** - * Represent the execution details of the job. + * Typed value of the statistics. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetail { - /** - * Processed baseline datasets. - */ - baselineDatasets?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetailProcessedDataset[]; - /** - * Additional job error status. - */ - error?: Schema$GoogleRpcStatus; + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValue { /** - * Status of data processing for each monitoring objective. Key is the objective. + * Distribution. */ - objectiveStatus?: {[key: string]: Schema$GoogleRpcStatus} | null; + distributionValue?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValueDistributionDataValue; /** - * Processed target datasets. + * Double. */ - targetDatasets?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetailProcessedDataset[]; + doubleValue?: number | null; } /** - * Processed dataset information. + * Summary statistics for a population of values. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringJobExecutionDetailProcessedDataset { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValueDistributionDataValue { /** - * Actual data location of the processed dataset. + * tensorflow.metadata.v0.DatasetFeatureStatistics format. */ - location?: string | null; + distribution?: any | null; /** - * Dataset time range information if any. + * Distribution distance deviation from the current dataset's statistics to baseline dataset's statistics. * For categorical feature, the distribution distance is calculated by L-inifinity norm or Jensen–Shannon divergence. * For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. */ - timeRange?: Schema$GoogleTypeInterval; + distributionDeviation?: number | null; } /** - * Notification spec(email, notification channel) for model monitoring statistics/alerts. + * A collection of data points that describes the time-varying values of a tabular metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringTabularStats { /** - * Email alert config. + * The data points of this time series. When listing time series, points are returned in reverse time order. */ - emailConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecEmailConfig; + dataPoints?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPoint[]; /** - * Dump the anomalies to Cloud Logging. The anomalies will be put to json payload encoded from proto google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry. This can be further sinked to Pub/Sub or any other services supported by Cloud Logging. + * One of the supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` */ - enableCloudLogging?: boolean | null; + objectiveType?: string | null; /** - * Notification channel config. + * The stats name. */ - notificationChannelConfigs?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecNotificationChannelConfig[]; + statsName?: string | null; } /** - * The config for email alerts. + * The monitoring target refers to the entity that is subject to analysis. e.g. Vertex AI Model version. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecEmailConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTarget { /** - * The email addresses to send the alerts. + * Model in Vertex AI Model Registry. */ - userEmails?: string[] | null; + vertexModel?: Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTargetVertexModelSource; } /** - * Google Cloud Notification Channel config. + * Model in Vertex AI Model Registry. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpecNotificationChannelConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTargetVertexModelSource { /** - * Resource names of the NotificationChannels. Must be of the format `projects//notificationChannels/` + * Model resource name. Format: projects/{project\}/locations/{location\}/models/{model\}. */ - notificationChannel?: string | null; + model?: string | null; + /** + * Model version id. + */ + modelVersionId?: string | null; } /** - * The objective configuration for model monitoring, including the information needed to detect anomalies for one particular model. + * Contains information about the original Model if this Model is a copy. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfig { - /** - * The config for integrating with Vertex Explainable AI. - */ - explanationConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfig; + export interface Schema$GoogleCloudAiplatformV1beta1ModelOriginalModelInfo { /** - * The config for drift of prediction data. + * Output only. The resource name of the Model this Model is a copy of, including the revision. Format: `projects/{project\}/locations/{location\}/models/{model_id\}@{version_id\}` */ - predictionDriftDetectionConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig; + model?: string | null; + } + /** + * Detail description of the source information of the model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ModelSourceInfo { /** - * Training dataset for models. This field has to be set only if TrainingPredictionSkewDetectionConfig is specified. + * If this Model is copy of another Model. If true then source_type pertains to the original. */ - trainingDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingDataset; + copy?: boolean | null; /** - * The config for skew between training data and prediction data. + * Type of the model source. */ - trainingPredictionSkewDetectionConfig?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig; + sourceType?: string | null; } /** - * The config for integrating with Vertex Explainable AI. Only applicable if the Model has explanation_spec populated. + * Runtime operation information for IndexEndpointService.MutateDeployedIndex. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfig { + export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedIndexOperationMetadata { /** - * If want to analyze the Vertex Explainable AI feature attribute scores or not. If set to true, Vertex AI will log the feature attributions from explain response and do the skew/drift detection for them. + * The unique index id specified by user */ - enableFeatureAttributes?: boolean | null; + deployedIndexId?: string | null; /** - * Predictions generated by the BatchPredictionJob using baseline dataset. + * The operation generic information. */ - explanationBaseline?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Output from BatchPredictionJob for Model Monitoring baseline dataset, which can be used to generate baseline attribution scores. + * Response message for IndexEndpointService.MutateDeployedIndex. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline { - /** - * BigQuery location for BatchExplain output. - */ - bigquery?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; + export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedIndexResponse { /** - * Cloud Storage location for BatchExplain output. + * The DeployedIndex that had been updated in the IndexEndpoint. */ - gcs?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + deployedIndex?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex; + } + /** + * Runtime operation information for EndpointService.MutateDeployedModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedModelOperationMetadata { /** - * The storage format of the predictions generated BatchPrediction job. + * The operation generic information. */ - predictionFormat?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * The config for Prediction data drift detection. + * Request message for EndpointService.MutateDeployedModel. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig { + export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedModelRequest { /** - * Key is the feature name and value is the threshold. The threshold here is against attribution score distance between different time windows. + * Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: * `min_replica_count` in either DedicatedResources or AutomaticResources * `max_replica_count` in either DedicatedResources or AutomaticResources * autoscaling_metric_specs * `disable_container_logging` (v1 only) * `enable_container_logging` (v1beta1 only) */ - attributionScoreDriftThresholds?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; - } | null; + deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; /** - * Drift anomaly detection threshold used by all features. When the per-feature thresholds are not set, this field can be used to specify a threshold for all features. + * Required. The update mask applies to the resource. See google.protobuf.FieldMask. */ - defaultDriftThreshold?: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; + updateMask?: string | null; + } + /** + * Response message for EndpointService.MutateDeployedModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedModelResponse { /** - * Key is the feature name and value is the threshold. If a feature needs to be monitored for drift, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between different time windws. + * The DeployedModel that's being mutated. */ - driftThresholds?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; - } | null; + deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; } /** - * Training Dataset information. + * Represents a Neural Architecture Search (NAS) job. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingDataset { + export interface Schema$GoogleCloudAiplatformV1beta1NasJob { /** - * The BigQuery table of the unmanaged Dataset used to train this Model. + * Output only. Time when the NasJob was created. */ - bigquerySource?: Schema$GoogleCloudAiplatformV1beta1BigQuerySource; + createTime?: string | null; /** - * Data format of the dataset, only applicable if the input is from Google Cloud Storage. The possible formats are: "tf-record" The source file is a TFRecord file. "csv" The source file is a CSV file. "jsonl" The source file is a JSONL file. + * Required. The display name of the NasJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - dataFormat?: string | null; + displayName?: string | null; /** - * The resource name of the Dataset used to train this Model. + * Optional. Enable a separation of Custom model training and restricted image training for tenant project. */ - dataset?: string | null; + enableRestrictedImageTraining?: boolean | null; /** - * The Google Cloud Storage uri of the unmanaged Dataset used to train this Model. + * Customer-managed encryption key options for a NasJob. If this is set, then all resources created by the NasJob will be encrypted with the provided encryption key. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Strategy to sample data from Training Dataset. If not set, we process the whole dataset. + * Output only. Time when the NasJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ - loggingSamplingStrategy?: Schema$GoogleCloudAiplatformV1beta1SamplingStrategy; + endTime?: string | null; /** - * The target field name the model is to predict. This field will be excluded when doing Predict and (or) Explain for the training data. + * Output only. Only populated when job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. */ - targetField?: string | null; - } - /** - * The config for Training & Prediction data skew detection. It specifies the training dataset sources and the skew detection parameters. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig { + error?: Schema$GoogleRpcStatus; /** - * Key is the feature name and value is the threshold. The threshold here is against attribution score distance between the training and prediction feature. + * The labels with user-defined metadata to organize NasJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - attributionScoreSkewThresholds?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; - } | null; + labels?: {[key: string]: string} | null; /** - * Skew anomaly detection threshold used by all features. When the per-feature thresholds are not set, this field can be used to specify a threshold for all features. + * Output only. Resource name of the NasJob. */ - defaultSkewThreshold?: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; + name?: string | null; /** - * Key is the feature name and value is the threshold. If a feature needs to be monitored for skew, a value threshold must be configured for that feature. The threshold here is against feature distribution distance between the training and prediction feature. + * Output only. Output of the NasJob. */ - skewThresholds?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; - } | null; - } - /** - * Monitoring objectives spec. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpec { + nasJobOutput?: Schema$GoogleCloudAiplatformV1beta1NasJobOutput; /** - * Baseline dataset. It could be the training dataset or production serving dataset from a previous period. + * Required. The specification of a NasJob. */ - baselineDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput; + nasJobSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpec; /** - * The explanation spec. This spec is required when the objectives spec includes feature attribution objectives. + * Output only. Time when the NasJob for the first time entered the `JOB_STATE_RUNNING` state. */ - explanationSpec?: Schema$GoogleCloudAiplatformV1beta1ExplanationSpec; + startTime?: string | null; /** - * Tabular monitoring objective. + * Output only. The detailed state of the job. */ - tabularObjective?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecTabularObjective; + state?: string | null; /** - * Target dataset. + * Output only. Time when the NasJob was most recently updated. */ - targetDataset?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringInput; + updateTime?: string | null; } /** - * Data drift monitoring spec. Data drift measures the distribution distance between the current dataset and a baseline dataset. A typical use case is to detect data drift between the recent production serving dataset and the training dataset, or to compare the recent production dataset with a dataset from a previous period. + * Represents a uCAIP NasJob output. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecDataDriftSpec { + export interface Schema$GoogleCloudAiplatformV1beta1NasJobOutput { /** - * Supported metrics type: * l_infinity * jensen_shannon_divergence + * Output only. The output of this multi-trial Neural Architecture Search (NAS) job. */ - categoricalMetricType?: string | null; + multiTrialJobOutput?: Schema$GoogleCloudAiplatformV1beta1NasJobOutputMultiTrialJobOutput; + } + /** + * The output of a multi-trial Neural Architecture Search (NAS) jobs. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NasJobOutputMultiTrialJobOutput { /** - * Default alert condition for all the categorical features. + * Output only. List of NasTrials that were started as part of search stage. */ - defaultCategoricalAlertCondition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; + searchTrials?: Schema$GoogleCloudAiplatformV1beta1NasTrial[]; /** - * Default alert condition for all the numeric features. + * Output only. List of NasTrials that were started as part of train stage. */ - defaultNumericAlertCondition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; + trainTrials?: Schema$GoogleCloudAiplatformV1beta1NasTrial[]; + } + /** + * Represents the spec of a NasJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpec { /** - * Per feature alert condition will override default alert condition. + * The spec of multi-trial algorithms. */ - featureAlertConditions?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; - } | null; + multiTrialAlgorithmSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpec; /** - * Feature names / Prediction output names interested in monitoring. These should be a subset of the input feature names or prediction output names specified in the monitoring schema. If the field is not specified all features / prediction outputs outlied in the monitoring schema will be used. + * The ID of the existing NasJob in the same Project and Location which will be used to resume search. search_space_spec and nas_algorithm_spec are obtained from previous NasJob hence should not provide them again for this NasJob. */ - features?: string[] | null; + resumeNasJobId?: string | null; /** - * Supported metrics type: * jensen_shannon_divergence + * It defines the search space for Neural Architecture Search (NAS). */ - numericMetricType?: string | null; + searchSpaceSpec?: string | null; } /** - * Feature attribution monitoring spec. + * The spec of multi-trial Neural Architecture Search (NAS). */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecFeatureAttributionSpec { + export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpec { /** - * The config of resources used by the Model Monitoring during the batch explanation for non-AutoML models. If not set, `n1-standard-2` machine type will be used by default. + * Metric specs for the NAS job. Validation for this field is done at `multi_trial_algorithm_spec` field. */ - batchExplanationDedicatedResources?: Schema$GoogleCloudAiplatformV1beta1BatchDedicatedResources; + metric?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecMetricSpec; /** - * Default alert condition for all the features. + * The multi-trial Neural Architecture Search (NAS) algorithm type. Defaults to `REINFORCEMENT_LEARNING`. */ - defaultAlertCondition?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; + multiTrialAlgorithm?: string | null; /** - * Per feature alert condition will override default alert condition. + * Required. Spec for search trials. */ - featureAlertConditions?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlertCondition; - } | null; + searchTrialSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec; /** - * Feature names interested in monitoring. These should be a subset of the input feature names specified in the monitoring schema. If the field is not specified all features outlied in the monitoring schema will be used. + * Spec for train trials. Top N [TrainTrialSpec.max_parallel_trial_count] search trials will be trained for every M [TrainTrialSpec.frequency] trials searched. */ - features?: string[] | null; + trainTrialSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec; } /** - * Tabular monitoring objective. + * Represents a metric to optimize. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecTabularObjective { - /** - * Feature attribution monitoring spec. - */ - featureAttributionSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecFeatureAttributionSpec; + export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecMetricSpec { /** - * Input feature distribution drift monitoring spec. + * Required. The optimization goal of the metric. */ - featureDriftSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecDataDriftSpec; + goal?: string | null; /** - * Prediction output distribution drift monitoring spec. + * Required. The ID of the metric. Must not contain whitespaces. */ - predictionOutputDriftSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpecDataDriftSpec; + metricId?: string | null; } /** - * Specification for the export destination of monitoring results, including metrics, logs, etc. + * Represent spec for search trials. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringOutputSpec { + export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { /** - * Google Cloud Storage base folder path for metrics, error logs, etc. + * The number of failed trials that need to be seen before failing the NasJob. If set to 0, Vertex AI decides how many trials must fail before the whole job fails. */ - gcsBaseDirectory?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; - } - /** - * The Model Monitoring Schema definition. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchema { - /** - * Feature names of the model. Vertex AI will try to match the features from your dataset as follows: * For 'csv' files, the header names are required, and we will extract the corresponding feature values when the header names align with the feature names. * For 'jsonl' files, we will extract the corresponding feature values if the key names match the feature names. Note: Nested features are not supported, so please ensure your features are flattened. Ensure the feature values are scalar or an array of scalars. * For 'bigquery' dataset, we will extract the corresponding feature values if the column names match the feature names. Note: The column type can be a scalar or an array of scalars. STRUCT or JSON types are not supported. You may use SQL queries to select or aggregate the relevant features from your original table. However, ensure that the 'schema' of the query results meets our requirements. * For the Vertex AI Endpoint Request Response Logging table or Vertex AI Batch Prediction Job results. If the instance_type is an array, ensure that the sequence in feature_fields matches the order of features in the prediction instance. We will match the feature with the array in the order specified in [feature_fields]. - */ - featureFields?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema[]; - /** - * Target /ground truth names of the model. - */ - groundTruthFields?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema[]; - /** - * Prediction output names of the model. The requirements are the same as the feature_fields. For AutoML Tables, the prediction output name presented in schema will be: `predicted_{target_column\}`, the `target_column` is the one you specified when you train the model. For Prediction output drift analysis: * AutoML Classification, the distribution of the argmax label will be analyzed. * AutoML Regression, the distribution of the value will be analyzed. - */ - predictionFields?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema[]; - } - /** - * Schema field definition. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSchemaFieldSchema { + maxFailedTrialCount?: number | null; /** - * Supported data types are: `float` `integer` `boolean` `string` `categorical` + * Required. The maximum number of trials to run in parallel. */ - dataType?: string | null; + maxParallelTrialCount?: number | null; /** - * Field name. + * Required. The maximum number of Neural Architecture Search (NAS) trials to run. */ - name?: string | null; + maxTrialCount?: number | null; /** - * Describes if the schema field is an array of given data type. + * Required. The spec of a search trial job. The same spec applies to all search trials. */ - repeated?: boolean | null; + searchTrialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; } /** - * Monitoring monitoring job spec. It outlines the specifications for monitoring objectives, notifications, and result exports. + * Represent spec for train trials. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringSpec { + export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { /** - * The model monitoring notification spec. + * Required. Frequency of search trials to start train stage. Top N [TrainTrialSpec.max_parallel_trial_count] search trials will be trained for every M [TrainTrialSpec.frequency] trials searched. */ - notificationSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringNotificationSpec; + frequency?: number | null; /** - * The monitoring objective spec. + * Required. The maximum number of trials to run in parallel. */ - objectiveSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringObjectiveSpec; + maxParallelTrialCount?: number | null; /** - * The Output destination spec for metrics, error logs, etc. + * Required. The spec of a train trial job. The same spec applies to all train trials. */ - outputSpec?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringOutputSpec; + trainTrialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; } /** - * Represents the collection of statistics for a metric. + * Represents a uCAIP NasJob trial. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStats { + export interface Schema$GoogleCloudAiplatformV1beta1NasTrial { /** - * Generated tabular statistics. + * Output only. Time when the NasTrial's status changed to `SUCCEEDED` or `INFEASIBLE`. */ - tabularStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringTabularStats; - } - /** - * Statistics and anomalies generated by Model Monitoring. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomalies { + endTime?: string | null; /** - * Number of anomalies within all stats. + * Output only. The final measurement containing the objective value. */ - anomalyCount?: number | null; + finalMeasurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; /** - * Deployed Model ID. + * Output only. The identifier of the NasTrial assigned by the service. */ - deployedModelId?: string | null; + id?: string | null; /** - * A list of historical Stats and Anomalies generated for all Features. + * Output only. Time when the NasTrial was started. */ - featureStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies[]; + startTime?: string | null; /** - * Model Monitoring Objective those stats and anomalies belonging to. + * Output only. The detailed state of the NasTrial. */ - objective?: string | null; + state?: string | null; } /** - * Historical Stats (and Anomalies) for a specific Feature. + * Represents a NasTrial details along with its parameters. If there is a corresponding train NasTrial, the train NasTrial is also returned. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { + export interface Schema$GoogleCloudAiplatformV1beta1NasTrialDetail { /** - * Display Name of the Feature. + * Output only. Resource name of the NasTrialDetail. */ - featureDisplayName?: string | null; + name?: string | null; /** - * A list of historical stats generated by different time window's Prediction Dataset. + * The parameters for the NasJob NasTrial. */ - predictionStats?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly[]; + parameters?: string | null; /** - * Threshold for anomaly detection. + * The requested search NasTrial. */ - threshold?: Schema$GoogleCloudAiplatformV1beta1ThresholdConfig; + searchTrial?: Schema$GoogleCloudAiplatformV1beta1NasTrial; /** - * Stats calculated for the Training Dataset. + * The train NasTrial corresponding to search_trial. Only populated if search_trial is used for training. */ - trainingStats?: Schema$GoogleCloudAiplatformV1beta1FeatureStatsAnomaly; + trainTrial?: Schema$GoogleCloudAiplatformV1beta1NasTrial; } /** - * Represents a single statistics data point. + * A query to find a number of similar entities. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPoint { - /** - * Algorithm used to calculated the metrics, eg: jensen_shannon_divergence, l_infinity. - */ - algorithm?: string | null; - /** - * Statistics from baseline dataset. - */ - baselineStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValue; + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQuery { /** - * Statistics create time. + * Optional. The embedding vector that be used for similar search. */ - createTime?: string | null; + embedding?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryEmbedding; /** - * Statistics from current dataset. + * Optional. The entity id whose similar entities should be searched for. If embedding is set, search will use embedding instead of entity_id. */ - currentStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValue; + entityId?: string | null; /** - * Indicate if the statistics has anomaly. + * Optional. The number of similar entities to be retrieved from feature view for each query. */ - hasAnomaly?: boolean | null; + neighborCount?: number | null; /** - * Model monitoring job resource name. + * Optional. Parameters that can be set to tune query on the fly. */ - modelMonitoringJob?: string | null; + parameters?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryParameters; /** - * Schedule resource name. + * Optional. Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than sper_crowding_attribute_neighbor_count of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. */ - schedule?: string | null; + perCrowdingAttributeNeighborCount?: number | null; /** - * Threshold value. + * Optional. The list of string filters. */ - thresholdValue?: number | null; + stringFilters?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryStringFilter[]; } /** - * Typed value of the statistics. + * The embedding vector. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValue { - /** - * Distribution. - */ - distributionValue?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValueDistributionDataValue; + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryEmbedding { /** - * Double. + * Optional. Individual value in the embedding. */ - doubleValue?: number | null; + value?: number[] | null; } /** - * Summary statistics for a population of values. + * Parameters that can be overrided in each query to tune query latency and recall. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPointTypedValueDistributionDataValue { + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryParameters { /** - * tensorflow.metadata.v0.DatasetFeatureStatistics format. + * Optional. The number of neighbors to find via approximate search before exact reordering is performed; if set, this value must be \> neighbor_count. */ - distribution?: any | null; + approximateNeighborCandidates?: number | null; /** - * Distribution distance deviation from the current dataset's statistics to baseline dataset's statistics. * For categorical feature, the distribution distance is calculated by L-inifinity norm or Jensen–Shannon divergence. * For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. + * Optional. The fraction of the number of leaves to search, set at query time allows user to tune search performance. This value increase result in both search accuracy and latency increase. The value should be between 0.0 and 1.0. */ - distributionDeviation?: number | null; + leafNodesSearchFraction?: number | null; } /** - * A collection of data points that describes the time-varying values of a tabular metric. + * String filter is used to search a subset of the entities by using boolean rules on string columns. For example: if a query specifies string filter with 'name = color, allow_tokens = {red, blue\}, deny_tokens = {purple\}',' then that query will match entities that are red or blue, but if those points are also purple, then they will be excluded even if they are red/blue. Only string filter is supported for now, numeric filter will be supported in the near future. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitoringTabularStats { + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryStringFilter { /** - * The data points of this time series. When listing time series, points are returned in reverse time order. + * Optional. The allowed tokens. */ - dataPoints?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsDataPoint[]; + allowTokens?: string[] | null; /** - * One of the supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` + * Optional. The denied tokens. */ - objectiveType?: string | null; + denyTokens?: string[] | null; /** - * The stats name. + * Required. Column names in BigQuery that used as filters. */ - statsName?: string | null; + name?: string | null; } /** - * The monitoring target refers to the entity that is subject to analysis. e.g. Vertex AI Model version. + * Nearest neighbors for one query. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTarget { + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighbors { /** - * Model in Vertex AI Model Registry. + * All its neighbors. */ - vertexModel?: Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTargetVertexModelSource; + neighbors?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborsNeighbor[]; } /** - * Model in Vertex AI Model Registry. + * Runtime operation metadata with regard to Matching Engine Index. */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelMonitorModelMonitoringTargetVertexModelSource { + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata { /** - * Model resource name. Format: projects/{project\}/locations/{location\}/models/{model\}. + * The validation stats of the content (per file) to be inserted or updated on the Matching Engine Index resource. Populated if contentsDeltaUri is provided as part of Index.metadata. Please note that, currently for those files that are broken or has unsupported file format, we will not have the stats for those files. */ - model?: string | null; + contentValidationStats?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataContentValidationStats[]; /** - * Model version id. + * The ingested data size in bytes. */ - modelVersionId?: string | null; + dataBytesCount?: string | null; } - /** - * Contains information about the original Model if this Model is a copy. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelOriginalModelInfo { + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataContentValidationStats { /** - * Output only. The resource name of the Model this Model is a copy of, including the revision. Format: `projects/{project\}/locations/{location\}/models/{model_id\}@{version_id\}` + * Number of records in this file we skipped due to validate errors. */ - model?: string | null; - } - /** - * Detail description of the source information of the model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ModelSourceInfo { + invalidRecordCount?: string | null; /** - * If this Model is copy of another Model. If true then source_type pertains to the original. + * Number of sparse records in this file we skipped due to validate errors. */ - copy?: boolean | null; + invalidSparseRecordCount?: string | null; /** - * Type of the model source. + * The detail information of the partial failures encountered for those invalid records that couldn't be parsed. Up to 50 partial errors will be reported. */ - sourceType?: string | null; - } - /** - * Runtime operation information for IndexEndpointService.MutateDeployedIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedIndexOperationMetadata { + partialErrors?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataRecordError[]; /** - * The unique index id specified by user + * Cloud Storage URI pointing to the original file in user's bucket. */ - deployedIndexId?: string | null; + sourceGcsUri?: string | null; /** - * The operation generic information. + * Number of records in this file that were successfully processed. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Response message for IndexEndpointService.MutateDeployedIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedIndexResponse { + validRecordCount?: string | null; /** - * The DeployedIndex that had been updated in the IndexEndpoint. + * Number of sparse records in this file that were successfully processed. */ - deployedIndex?: Schema$GoogleCloudAiplatformV1beta1DeployedIndex; + validSparseRecordCount?: string | null; } - /** - * Runtime operation information for EndpointService.MutateDeployedModel. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedModelOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataRecordError { /** - * The operation generic information. + * Empty if the embedding id is failed to parse. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for EndpointService.MutateDeployedModel. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedModelRequest { + embeddingId?: string | null; /** - * Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: * `min_replica_count` in either DedicatedResources or AutomaticResources * `max_replica_count` in either DedicatedResources or AutomaticResources * autoscaling_metric_specs * `disable_container_logging` (v1 only) * `enable_container_logging` (v1beta1 only) + * A human-readable message that is shown to the user to help them fix the error. Note that this message may change from time to time, your code should check against error_type as the source of truth. */ - deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; + errorMessage?: string | null; /** - * Required. The update mask applies to the resource. See google.protobuf.FieldMask. + * The error type of this record. */ - updateMask?: string | null; - } - /** - * Response message for EndpointService.MutateDeployedModel. - */ - export interface Schema$GoogleCloudAiplatformV1beta1MutateDeployedModelResponse { + errorType?: string | null; /** - * The DeployedModel that's being mutated. + * The original content of this record. */ - deployedModel?: Schema$GoogleCloudAiplatformV1beta1DeployedModel; + rawRecord?: string | null; + /** + * Cloud Storage URI pointing to the original file in user's bucket. + */ + sourceGcsUri?: string | null; } /** - * Represents a Neural Architecture Search (NAS) job. + * A neighbor of the query vector. */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJob { + export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborsNeighbor { /** - * Output only. Time when the NasJob was created. + * The distance between the neighbor and the query vector. */ - createTime?: string | null; + distance?: number | null; /** - * Required. The display name of the NasJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * The id of the similar entity. */ - displayName?: string | null; + entityId?: string | null; /** - * Optional. Enable a separation of Custom model training and restricted image training for tenant project. + * The attributes of the neighbor, e.g. filters, crowding and metadata Note that full entities are returned only when "return_full_entity" is set to true. Otherwise, only the "entity_id" and "distance" fields are populated. */ - enableRestrictedImageTraining?: boolean | null; + entityKeyValues?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponse; + } + /** + * Neighbors for example-based explanations. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Neighbor { /** - * Customer-managed encryption key options for a NasJob. If this is set, then all resources created by the NasJob will be encrypted with the provided encryption key. + * Output only. The neighbor distance. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + neighborDistance?: number | null; /** - * Output only. Time when the NasJob entered any of the following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + * Output only. The neighbor id. */ - endTime?: string | null; + neighborId?: string | null; + } + /** + * Network spec. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NetworkSpec { /** - * Output only. Only populated when job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. + * Whether to enable public internet access. Default false. */ - error?: Schema$GoogleRpcStatus; + enableInternetAccess?: boolean | null; /** - * The labels with user-defined metadata to organize NasJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) */ - labels?: {[key: string]: string} | null; + network?: string | null; /** - * Output only. Resource name of the NasJob. + * The name of the subnet that this instance is in. Format: `projects/{project_id_or_number\}/regions/{region\}/subnetworks/{subnetwork_id\}` */ - name?: string | null; + subnetwork?: string | null; + } + /** + * Represents a mount configuration for Network File System (NFS) to mount. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NfsMount { /** - * Output only. Output of the NasJob. + * Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ */ - nasJobOutput?: Schema$GoogleCloudAiplatformV1beta1NasJobOutput; + mountPoint?: string | null; /** - * Required. The specification of a NasJob. + * Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` */ - nasJobSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpec; + path?: string | null; /** - * Output only. Time when the NasJob for the first time entered the `JOB_STATE_RUNNING` state. + * Required. IP address of the NFS server. */ - startTime?: string | null; + server?: string | null; + } + /** + * The euc configuration of NotebookRuntimeTemplate. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NotebookEucConfig { /** - * Output only. The detailed state of the job. + * Output only. Whether ActAs check is bypassed for service account attached to the VM. If false, we need ActAs check for the default Compute Engine Service account. When a Runtime is created, a VM is allocated using Default Compute Engine Service Account. Any user requesting to use this Runtime requires Service Account User (ActAs) permission over this SA. If true, Runtime owner is using EUC and does not require the above permission as VM no longer use default Compute Engine SA, but a P4SA. */ - state?: string | null; + bypassActasCheck?: boolean | null; /** - * Output only. Time when the NasJob was most recently updated. + * Input only. Whether EUC is disabled in this NotebookRuntimeTemplate. In proto3, the default value of a boolean is false. In this way, by default EUC will be enabled for NotebookRuntimeTemplate. */ - updateTime?: string | null; + eucDisabled?: boolean | null; } /** - * Represents a uCAIP NasJob output. + * NotebookExecutionJob represents an instance of a notebook execution. */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJobOutput { + export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJob { /** - * Output only. The output of this multi-trial Neural Architecture Search (NAS) job. + * Output only. Timestamp when this NotebookExecutionJob was created. */ - multiTrialJobOutput?: Schema$GoogleCloudAiplatformV1beta1NasJobOutputMultiTrialJobOutput; - } - /** - * The output of a multi-trial Neural Architecture Search (NAS) jobs. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJobOutputMultiTrialJobOutput { + createTime?: string | null; /** - * Output only. List of NasTrials that were started as part of search stage. + * The custom compute configuration for an execution job. */ - searchTrials?: Schema$GoogleCloudAiplatformV1beta1NasTrial[]; + customEnvironmentSpec?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec; /** - * Output only. List of NasTrials that were started as part of train stage. + * The Dataform Repository pointing to a single file notebook repository. */ - trainTrials?: Schema$GoogleCloudAiplatformV1beta1NasTrial[]; - } - /** - * Represents the spec of a NasJob. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpec { + dataformRepositorySource?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource; /** - * The spec of multi-trial algorithms. + * The contents of an input notebook file. */ - multiTrialAlgorithmSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpec; + directNotebookSource?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDirectNotebookSource; /** - * The ID of the existing NasJob in the same Project and Location which will be used to resume search. search_space_spec and nas_algorithm_spec are obtained from previous NasJob hence should not provide them again for this NasJob. + * The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - resumeNasJobId?: string | null; + displayName?: string | null; /** - * It defines the search space for Neural Architecture Search (NAS). + * Max running time of the execution job in seconds (default 86400s / 24 hrs). */ - searchSpaceSpec?: string | null; - } - /** - * The spec of multi-trial Neural Architecture Search (NAS). - */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpec { + executionTimeout?: string | null; /** - * Metric specs for the NAS job. Validation for this field is done at `multi_trial_algorithm_spec` field. + * The user email to run the execution as. Only supported by Colab runtimes. */ - metric?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecMetricSpec; + executionUser?: string | null; /** - * The multi-trial Neural Architecture Search (NAS) algorithm type. Defaults to `REINFORCEMENT_LEARNING`. + * The Cloud Storage url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` */ - multiTrialAlgorithm?: string | null; + gcsNotebookSource?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobGcsNotebookSource; /** - * Required. Spec for search trials. + * The Cloud Storage location to upload the result to. Format: `gs://bucket-name` */ - searchTrialSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec; + gcsOutputUri?: string | null; /** - * Spec for train trials. Top N [TrainTrialSpec.max_parallel_trial_count] search trials will be trained for every M [TrainTrialSpec.frequency] trials searched. + * Output only. The state of the NotebookExecutionJob. */ - trainTrialSpec?: Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec; - } - /** - * Represents a metric to optimize. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecMetricSpec { + jobState?: string | null; /** - * Required. The optimization goal of the metric. + * The labels with user-defined metadata to organize NotebookExecutionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - goal?: string | null; + labels?: {[key: string]: string} | null; /** - * Required. The ID of the metric. Must not contain whitespaces. + * Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id\}/locations/{location\}/notebookExecutionJobs/{job_id\}` */ - metricId?: string | null; - } - /** - * Represent spec for search trials. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { + name?: string | null; /** - * The number of failed trials that need to be seen before failing the NasJob. If set to 0, Vertex AI decides how many trials must fail before the whole job fails. + * The NotebookRuntimeTemplate to source compute configuration from. */ - maxFailedTrialCount?: number | null; + notebookRuntimeTemplateResourceName?: string | null; /** - * Required. The maximum number of trials to run in parallel. + * Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id\}/locations/{location\}/schedules/{schedule_id\}` */ - maxParallelTrialCount?: number | null; + scheduleResourceName?: string | null; /** - * Required. The maximum number of Neural Architecture Search (NAS) trials to run. + * The service account to run the execution as. */ - maxTrialCount?: number | null; + serviceAccount?: string | null; /** - * Required. The spec of a search trial job. The same spec applies to all search trials. + * Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. */ - searchTrialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; + status?: Schema$GoogleRpcStatus; + /** + * Output only. Timestamp when this NotebookExecutionJob was most recently updated. + */ + updateTime?: string | null; } /** - * Represent spec for train trials. + * Compute configuration to use for an execution job. */ - export interface Schema$GoogleCloudAiplatformV1beta1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { + export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec { /** - * Required. Frequency of search trials to start train stage. Top N [TrainTrialSpec.max_parallel_trial_count] search trials will be trained for every M [TrainTrialSpec.frequency] trials searched. + * The specification of a single machine for the execution job. */ - frequency?: number | null; + machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; /** - * Required. The maximum number of trials to run in parallel. + * The network configuration to use for the execution job. */ - maxParallelTrialCount?: number | null; + networkSpec?: Schema$GoogleCloudAiplatformV1beta1NetworkSpec; /** - * Required. The spec of a train trial job. The same spec applies to all train trials. + * The specification of a persistent disk to attach for the execution job. */ - trainTrialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; + persistentDiskSpec?: Schema$GoogleCloudAiplatformV1beta1PersistentDiskSpec; } /** - * Represents a uCAIP NasJob trial. + * The Dataform Repository containing the input notebook. */ - export interface Schema$GoogleCloudAiplatformV1beta1NasTrial { + export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource { /** - * Output only. Time when the NasTrial's status changed to `SUCCEEDED` or `INFEASIBLE`. + * The commit SHA to read repository with. If unset, the file will be read at HEAD. */ - endTime?: string | null; + commitSha?: string | null; /** - * Output only. The final measurement containing the objective value. + * The resource name of the Dataform Repository. Format: `projects/{project_id\}/locations/{location\}/repositories/{repository_id\}` */ - finalMeasurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; + dataformRepositoryResourceName?: string | null; + } + /** + * The content of the input notebook in ipynb format. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDirectNotebookSource { /** - * Output only. The identifier of the NasTrial assigned by the service. + * The base64-encoded contents of the input notebook file. */ - id?: string | null; + content?: string | null; + } + /** + * The Cloud Storage uri for the input notebook. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobGcsNotebookSource { /** - * Output only. Time when the NasTrial was started. + * The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. */ - startTime?: string | null; + generation?: string | null; /** - * Output only. The detailed state of the NasTrial. + * The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` */ - state?: string | null; + uri?: string | null; } /** - * Represents a NasTrial details along with its parameters. If there is a corresponding train NasTrial, the train NasTrial is also returned. + * The idle shutdown configuration of NotebookRuntimeTemplate, which contains the idle_timeout as required field. */ - export interface Schema$GoogleCloudAiplatformV1beta1NasTrialDetail { + export interface Schema$GoogleCloudAiplatformV1beta1NotebookIdleShutdownConfig { /** - * Output only. Resource name of the NasTrialDetail. + * Whether Idle Shutdown is disabled in this NotebookRuntimeTemplate. */ - name?: string | null; + idleShutdownDisabled?: boolean | null; /** - * The parameters for the NasJob NasTrial. + * Required. Duration is accurate to the second. In Notebook, Idle Timeout is accurate to minute so the range of idle_timeout (second) is: 10 * 60 ~ 1440 * 60. */ - parameters?: string | null; + idleTimeout?: string | null; + } + /** + * Notebook Reservation Affinity for consuming Zonal reservation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NotebookReservationAffinity { /** - * The requested search NasTrial. + * Required. Specifies the type of reservation from which this instance can consume resources: RESERVATION_ANY (default), RESERVATION_SPECIFIC, or RESERVATION_NONE. See Consuming reserved instances for examples. */ - searchTrial?: Schema$GoogleCloudAiplatformV1beta1NasTrial; + consumeReservationType?: string | null; /** - * The train NasTrial corresponding to search_trial. Only populated if search_trial is used for training. + * Optional. Corresponds to the label key of a reservation resource. To target a RESERVATION_SPECIFIC by name, use compute.googleapis.com/reservation-name as the key and specify the name of your reservation as its value. */ - trainTrial?: Schema$GoogleCloudAiplatformV1beta1NasTrial; + key?: string | null; + /** + * Optional. Corresponds to the label values of a reservation resource. This must be the full path name of Reservation. + */ + values?: string[] | null; } /** - * A query to find a number of similar entities. + * A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary basis with lifetime limited to 24 hours. */ - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQuery { + export interface Schema$GoogleCloudAiplatformV1beta1NotebookRuntime { /** - * Optional. The embedding vector that be used for similar search. + * Output only. Timestamp when this NotebookRuntime was created. */ - embedding?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryEmbedding; + createTime?: string | null; /** - * Optional. The entity id whose similar entities should be searched for. If embedding is set, search will use embedding instead of entity_id. + * The description of the NotebookRuntime. */ - entityId?: string | null; + description?: string | null; /** - * Optional. The number of similar entities to be retrieved from feature view for each query. + * Required. The display name of the NotebookRuntime. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - neighborCount?: number | null; + displayName?: string | null; /** - * Optional. Parameters that can be set to tune query on the fly. + * Output only. Timestamp when this NotebookRuntime will be expired: 1. System Predefined NotebookRuntime: 24 hours after creation. After expiration, system predifined runtime will be deleted. 2. User created NotebookRuntime: 6 months after last upgrade. After expiration, user created runtime will be stopped and allowed for upgrade. */ - parameters?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryParameters; + expirationTime?: string | null; /** - * Optional. Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than sper_crowding_attribute_neighbor_count of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. + * Output only. The health state of the NotebookRuntime. */ - perCrowdingAttributeNeighborCount?: number | null; + healthState?: string | null; /** - * Optional. The list of string filters. + * Output only. Whether NotebookRuntime is upgradable. */ - stringFilters?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryStringFilter[]; - } - /** - * The embedding vector. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryEmbedding { + isUpgradable?: boolean | null; /** - * Optional. Individual value in the embedding. + * The labels with user-defined metadata to organize your NotebookRuntime. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one NotebookRuntime (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for NotebookRuntime: * "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": output only, its value is the Compute Engine instance id. * "aiplatform.googleapis.com/colab_enterprise_entry_service": its value is either "bigquery" or "vertex"; if absent, it should be "vertex". This is to describe the entry service, either BigQuery or Vertex. */ - value?: number[] | null; - } - /** - * Parameters that can be overrided in each query to tune query latency and recall. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryParameters { + labels?: {[key: string]: string} | null; /** - * Optional. The number of neighbors to find via approximate search before exact reordering is performed; if set, this value must be \> neighbor_count. + * Output only. The resource name of the NotebookRuntime. */ - approximateNeighborCandidates?: number | null; + name?: string | null; /** - * Optional. The fraction of the number of leaves to search, set at query time allows user to tune search performance. This value increase result in both search accuracy and latency increase. The value should be between 0.0 and 1.0. + * Optional. The Compute Engine tags to add to runtime (see [Tagging instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). */ - leafNodesSearchFraction?: number | null; - } - /** - * String filter is used to search a subset of the entities by using boolean rules on string columns. For example: if a query specifies string filter with 'name = color, allow_tokens = {red, blue\}, deny_tokens = {purple\}',' then that query will match entities that are red or blue, but if those points are also purple, then they will be excluded even if they are red/blue. Only string filter is supported for now, numeric filter will be supported in the near future. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborQueryStringFilter { + networkTags?: string[] | null; /** - * Optional. The allowed tokens. + * Output only. The pointer to NotebookRuntimeTemplate this NotebookRuntime is created from. */ - allowTokens?: string[] | null; + notebookRuntimeTemplateRef?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplateRef; /** - * Optional. The denied tokens. + * Output only. The type of the notebook runtime. */ - denyTokens?: string[] | null; + notebookRuntimeType?: string | null; /** - * Required. Column names in BigQuery that used as filters. + * Output only. The proxy endpoint used to access the NotebookRuntime. */ - name?: string | null; - } - /** - * Nearest neighbors for one query. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighbors { + proxyUri?: string | null; /** - * All its neighbors. + * Output only. Reservation Affinity of the notebook runtime. */ - neighbors?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborsNeighbor[]; - } - /** - * Runtime operation metadata with regard to Matching Engine Index. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata { + reservationAffinity?: Schema$GoogleCloudAiplatformV1beta1NotebookReservationAffinity; /** - * The validation stats of the content (per file) to be inserted or updated on the Matching Engine Index resource. Populated if contentsDeltaUri is provided as part of Index.metadata. Please note that, currently for those files that are broken or has unsupported file format, we will not have the stats for those files. + * Output only. The runtime (instance) state of the NotebookRuntime. */ - contentValidationStats?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataContentValidationStats[]; + runtimeState?: string | null; /** - * The ingested data size in bytes. + * Required. The user email of the NotebookRuntime. */ - dataBytesCount?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataContentValidationStats { + runtimeUser?: string | null; /** - * Number of records in this file we skipped due to validate errors. + * Output only. Reserved for future use. */ - invalidRecordCount?: string | null; + satisfiesPzi?: boolean | null; /** - * Number of sparse records in this file we skipped due to validate errors. + * Output only. Reserved for future use. */ - invalidSparseRecordCount?: string | null; + satisfiesPzs?: boolean | null; /** - * The detail information of the partial failures encountered for those invalid records that couldn't be parsed. Up to 50 partial errors will be reported. + * Output only. The service account that the NotebookRuntime workload runs as. */ - partialErrors?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataRecordError[]; + serviceAccount?: string | null; /** - * Cloud Storage URI pointing to the original file in user's bucket. + * Output only. Timestamp when this NotebookRuntime was most recently updated. */ - sourceGcsUri?: string | null; + updateTime?: string | null; /** - * Number of records in this file that were successfully processed. + * Output only. The VM os image version of NotebookRuntime. */ - validRecordCount?: string | null; + version?: string | null; + } + /** + * A template that specifies runtime configurations such as machine type, runtime version, network configurations, etc. Multiple runtimes can be created from a runtime template. + */ + export interface Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplate { /** - * Number of sparse records in this file that were successfully processed. + * Output only. Timestamp when this NotebookRuntimeTemplate was created. */ - validSparseRecordCount?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadataRecordError { + createTime?: string | null; /** - * Empty if the embedding id is failed to parse. + * Optional. The specification of persistent disk attached to the runtime as data disk storage. */ - embeddingId?: string | null; + dataPersistentDiskSpec?: Schema$GoogleCloudAiplatformV1beta1PersistentDiskSpec; /** - * A human-readable message that is shown to the user to help them fix the error. Note that this message may change from time to time, your code should check against error_type as the source of truth. + * The description of the NotebookRuntimeTemplate. */ - errorMessage?: string | null; + description?: string | null; /** - * The error type of this record. + * Required. The display name of the NotebookRuntimeTemplate. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - errorType?: string | null; + displayName?: string | null; /** - * The original content of this record. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - rawRecord?: string | null; + etag?: string | null; /** - * Cloud Storage URI pointing to the original file in user's bucket. + * EUC configuration of the NotebookRuntimeTemplate. */ - sourceGcsUri?: string | null; - } - /** - * A neighbor of the query vector. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NearestNeighborsNeighbor { + eucConfig?: Schema$GoogleCloudAiplatformV1beta1NotebookEucConfig; /** - * The distance between the neighbor and the query vector. + * The idle shutdown configuration of NotebookRuntimeTemplate. This config will only be set when idle shutdown is enabled. */ - distance?: number | null; + idleShutdownConfig?: Schema$GoogleCloudAiplatformV1beta1NotebookIdleShutdownConfig; /** - * The id of the similar entity. + * Output only. The default template to use if not specified. */ - entityId?: string | null; + isDefault?: boolean | null; /** - * The attributes of the neighbor, e.g. filters, crowding and metadata Note that full entities are returned only when "return_full_entity" is set to true. Otherwise, only the "entity_id" and "distance" fields are populated. + * The labels with user-defined metadata to organize the NotebookRuntimeTemplates. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - entityKeyValues?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponse; - } - /** - * Neighbors for example-based explanations. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Neighbor { + labels?: {[key: string]: string} | null; /** - * Output only. The neighbor distance. + * Optional. Immutable. The specification of a single machine for the template. */ - neighborDistance?: number | null; + machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; /** - * Output only. The neighbor id. + * The resource name of the NotebookRuntimeTemplate. */ - neighborId?: string | null; - } - /** - * Network spec. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NetworkSpec { + name?: string | null; /** - * Whether to enable public internet access. Default false. + * Optional. Network spec. */ - enableInternetAccess?: boolean | null; + networkSpec?: Schema$GoogleCloudAiplatformV1beta1NetworkSpec; /** - * The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + * Optional. The Compute Engine tags to add to runtime (see [Tagging instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). */ - network?: string | null; + networkTags?: string[] | null; /** - * The name of the subnet that this instance is in. Format: `projects/{project_id_or_number\}/regions/{region\}/subnetworks/{subnetwork_id\}` + * Optional. Immutable. The type of the notebook runtime template. */ - subnetwork?: string | null; - } - /** - * Represents a mount configuration for Network File System (NFS) to mount. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NfsMount { + notebookRuntimeType?: string | null; /** - * Required. Destination mount path. The NFS will be mounted for the user under /mnt/nfs/ + * Optional. Reservation Affinity of the notebook runtime template. */ - mountPoint?: string | null; + reservationAffinity?: Schema$GoogleCloudAiplatformV1beta1NotebookReservationAffinity; /** - * Required. Source path exported from NFS server. Has to start with '/', and combined with the ip address, it indicates the source mount path in the form of `server:path` + * The service account that the runtime workload runs as. You can use any service account within the same project, but you must have the service account user permission to use the instance. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. */ - path?: string | null; + serviceAccount?: string | null; /** - * Required. IP address of the NFS server. + * Optional. Immutable. Runtime Shielded VM spec. */ - server?: string | null; + shieldedVmConfig?: Schema$GoogleCloudAiplatformV1beta1ShieldedVmConfig; + /** + * Output only. Timestamp when this NotebookRuntimeTemplate was most recently updated. + */ + updateTime?: string | null; } /** - * The euc configuration of NotebookRuntimeTemplate. + * Points to a NotebookRuntimeTemplateRef. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookEucConfig { - /** - * Output only. Whether ActAs check is bypassed for service account attached to the VM. If false, we need ActAs check for the default Compute Engine Service account. When a Runtime is created, a VM is allocated using Default Compute Engine Service Account. Any user requesting to use this Runtime requires Service Account User (ActAs) permission over this SA. If true, Runtime owner is using EUC and does not require the above permission as VM no longer use default Compute Engine SA, but a P4SA. - */ - bypassActasCheck?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplateRef { /** - * Input only. Whether EUC is disabled in this NotebookRuntimeTemplate. In proto3, the default value of a boolean is false. In this way, by default EUC will be enabled for NotebookRuntimeTemplate. + * Immutable. A resource name of the NotebookRuntimeTemplate. */ - eucDisabled?: boolean | null; + notebookRuntimeTemplate?: string | null; } /** - * NotebookExecutionJob represents an instance of a notebook execution. + * Input for pairwise question answering quality metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJob { + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInput { /** - * Output only. Timestamp when this NotebookExecutionJob was created. + * Required. Pairwise question answering quality instance. */ - createTime?: string | null; + instance?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInstance; /** - * The custom compute configuration for an execution job. + * Required. Spec for pairwise question answering quality score metric. */ - customEnvironmentSpec?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualitySpec; + } + /** + * Spec for pairwise question answering quality instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInstance { /** - * The Dataform Repository pointing to a single file notebook repository. + * Required. Output of the baseline model. */ - dataformRepositorySource?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource; + baselinePrediction?: string | null; /** - * The contents of an input notebook file. + * Required. Text to answer the question. */ - directNotebookSource?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDirectNotebookSource; + context?: string | null; /** - * The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Required. Question Answering prompt for LLM. */ - displayName?: string | null; + instruction?: string | null; /** - * Max running time of the execution job in seconds (default 86400s / 24 hrs). + * Required. Output of the candidate model. */ - executionTimeout?: string | null; + prediction?: string | null; /** - * The user email to run the execution as. Only supported by Colab runtimes. + * Optional. Ground truth used to compare against the prediction. */ - executionUser?: string | null; + reference?: string | null; + } + /** + * Spec for pairwise question answering quality result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityResult { /** - * The Cloud Storage url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + * Output only. Confidence for question answering quality score. */ - gcsNotebookSource?: Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobGcsNotebookSource; + confidence?: number | null; /** - * The Cloud Storage location to upload the result to. Format: `gs://bucket-name` + * Output only. Explanation for question answering quality score. */ - gcsOutputUri?: string | null; + explanation?: string | null; /** - * Output only. The state of the NotebookExecutionJob. + * Output only. Pairwise question answering prediction choice. */ - jobState?: string | null; + pairwiseChoice?: string | null; + } + /** + * Spec for pairwise question answering quality score metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualitySpec { /** - * The labels with user-defined metadata to organize NotebookExecutionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Optional. Whether to use instance.reference to compute question answering quality. */ - labels?: {[key: string]: string} | null; + useReference?: boolean | null; /** - * Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id\}/locations/{location\}/notebookExecutionJobs/{job_id\}` + * Optional. Which version to use for evaluation. */ - name?: string | null; + version?: number | null; + } + /** + * Input for pairwise summarization quality metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInput { /** - * The NotebookRuntimeTemplate to source compute configuration from. + * Required. Pairwise summarization quality instance. */ - notebookRuntimeTemplateResourceName?: string | null; + instance?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInstance; /** - * Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id\}/locations/{location\}/schedules/{schedule_id\}` + * Required. Spec for pairwise summarization quality score metric. */ - scheduleResourceName?: string | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualitySpec; + } + /** + * Spec for pairwise summarization quality instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInstance { /** - * The service account to run the execution as. + * Required. Output of the baseline model. */ - serviceAccount?: string | null; + baselinePrediction?: string | null; /** - * Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. + * Required. Text to be summarized. */ - status?: Schema$GoogleRpcStatus; + context?: string | null; /** - * Output only. Timestamp when this NotebookExecutionJob was most recently updated. + * Required. Summarization prompt for LLM. */ - updateTime?: string | null; + instruction?: string | null; + /** + * Required. Output of the candidate model. + */ + prediction?: string | null; + /** + * Optional. Ground truth used to compare against the prediction. + */ + reference?: string | null; } /** - * Compute configuration to use for an execution job. + * Spec for pairwise summarization quality result. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec { + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityResult { /** - * The specification of a single machine for the execution job. + * Output only. Confidence for summarization quality score. */ - machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; + confidence?: number | null; /** - * The network configuration to use for the execution job. + * Output only. Explanation for summarization quality score. */ - networkSpec?: Schema$GoogleCloudAiplatformV1beta1NetworkSpec; + explanation?: string | null; /** - * The specification of a persistent disk to attach for the execution job. + * Output only. Pairwise summarization prediction choice. */ - persistentDiskSpec?: Schema$GoogleCloudAiplatformV1beta1PersistentDiskSpec; + pairwiseChoice?: string | null; } /** - * The Dataform Repository containing the input notebook. + * Spec for pairwise summarization quality score metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource { + export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualitySpec { /** - * The commit SHA to read repository with. If unset, the file will be read at HEAD. + * Optional. Whether to use instance.reference to compute pairwise summarization quality. */ - commitSha?: string | null; + useReference?: boolean | null; /** - * The resource name of the Dataform Repository. Format: `projects/{project_id\}/locations/{location\}/repositories/{repository_id\}` + * Optional. Which version to use for evaluation. */ - dataformRepositoryResourceName?: string | null; + version?: number | null; } /** - * The content of the input notebook in ipynb format. + * A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobDirectNotebookSource { + export interface Schema$GoogleCloudAiplatformV1beta1Part { /** - * The base64-encoded contents of the input notebook file. + * Optional. URI based data. */ - content?: string | null; - } - /** - * The Cloud Storage uri for the input notebook. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookExecutionJobGcsNotebookSource { + fileData?: Schema$GoogleCloudAiplatformV1beta1FileData; /** - * The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. + * Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. */ - generation?: string | null; + functionCall?: Schema$GoogleCloudAiplatformV1beta1FunctionCall; /** - * The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + * Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. */ - uri?: string | null; - } - /** - * The idle shutdown configuration of NotebookRuntimeTemplate, which contains the idle_timeout as required field. - */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookIdleShutdownConfig { + functionResponse?: Schema$GoogleCloudAiplatformV1beta1FunctionResponse; /** - * Whether Idle Shutdown is disabled in this NotebookRuntimeTemplate. + * Optional. Inlined bytes data. */ - idleShutdownDisabled?: boolean | null; + inlineData?: Schema$GoogleCloudAiplatformV1beta1Blob; /** - * Required. Duration is accurate to the second. In Notebook, Idle Timeout is accurate to minute so the range of idle_timeout (second) is: 10 * 60 ~ 1440 * 60. + * Optional. Text part (can be code). */ - idleTimeout?: string | null; + text?: string | null; + /** + * Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + */ + videoMetadata?: Schema$GoogleCloudAiplatformV1beta1VideoMetadata; } /** - * Notebook Reservation Affinity for consuming Zonal reservation. + * Request message for JobService.PauseModelDeploymentMonitoringJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookReservationAffinity { - /** - * Required. Specifies the type of reservation from which this instance can consume resources: RESERVATION_ANY (default), RESERVATION_SPECIFIC, or RESERVATION_NONE. See Consuming reserved instances for examples. - */ - consumeReservationType?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1PauseModelDeploymentMonitoringJobRequest {} + /** + * Request message for ScheduleService.PauseSchedule. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PauseScheduleRequest {} + /** + * Represents the spec of persistent disk options. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PersistentDiskSpec { /** - * Optional. Corresponds to the label key of a reservation resource. To target a RESERVATION_SPECIFIC by name, use compute.googleapis.com/reservation-name as the key and specify the name of your reservation as its value. + * Size in GB of the disk (default is 100GB). */ - key?: string | null; + diskSizeGb?: string | null; /** - * Optional. Corresponds to the label values of a reservation resource. This must be the full path name of Reservation. + * Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) */ - values?: string[] | null; + diskType?: string | null; } /** - * A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary basis with lifetime limited to 24 hours. + * Represents long-lasting resources that are dedicated to users to runs custom workloads. A PersistentResource can have multiple node pools and each node pool can have its own machine spec. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookRuntime { + export interface Schema$GoogleCloudAiplatformV1beta1PersistentResource { /** - * Output only. Timestamp when this NotebookRuntime was created. + * Output only. Time when the PersistentResource was created. */ createTime?: string | null; /** - * The description of the NotebookRuntime. - */ - description?: string | null; - /** - * Required. The display name of the NotebookRuntime. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Optional. The display name of the PersistentResource. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string | null; /** - * Output only. Timestamp when this NotebookRuntime will be expired: 1. System Predefined NotebookRuntime: 24 hours after creation. After expiration, system predifined runtime will be deleted. 2. User created NotebookRuntime: 6 months after last upgrade. After expiration, user created runtime will be stopped and allowed for upgrade. - */ - expirationTime?: string | null; - /** - * Output only. The health state of the NotebookRuntime. + * Optional. Customer-managed encryption key spec for a PersistentResource. If set, this PersistentResource and all sub-resources of this PersistentResource will be secured by this key. */ - healthState?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Output only. Whether NotebookRuntime is upgradable. + * Output only. Only populated when persistent resource's state is `STOPPING` or `ERROR`. */ - isUpgradable?: boolean | null; + error?: Schema$GoogleRpcStatus; /** - * The labels with user-defined metadata to organize your NotebookRuntime. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one NotebookRuntime (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. Following system labels exist for NotebookRuntime: * "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": output only, its value is the Compute Engine instance id. * "aiplatform.googleapis.com/colab_enterprise_entry_service": its value is either "bigquery" or "vertex"; if absent, it should be "vertex". This is to describe the entry service, either BigQuery or Vertex. + * Optional. The labels with user-defined metadata to organize PersistentResource. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ labels?: {[key: string]: string} | null; /** - * Output only. The resource name of the NotebookRuntime. + * Immutable. Resource name of a PersistentResource. */ name?: string | null; /** - * Optional. The Compute Engine tags to add to runtime (see [Tagging instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). - */ - networkTags?: string[] | null; - /** - * Output only. The pointer to NotebookRuntimeTemplate this NotebookRuntime is created from. - */ - notebookRuntimeTemplateRef?: Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplateRef; - /** - * Output only. The type of the notebook runtime. - */ - notebookRuntimeType?: string | null; - /** - * Output only. The proxy endpoint used to access the NotebookRuntime. + * Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to peered with Vertex AI to host the persistent resources. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the resources aren't peered with any network. */ - proxyUri?: string | null; + network?: string | null; /** - * Output only. Reservation Affinity of the notebook runtime. + * Optional. A list of names for the reserved IP ranges under the VPC network that can be used for this persistent resource. If set, we will deploy the persistent resource within the provided IP ranges. Otherwise, the persistent resource is deployed to any IP ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. */ - reservationAffinity?: Schema$GoogleCloudAiplatformV1beta1NotebookReservationAffinity; + reservedIpRanges?: string[] | null; /** - * Output only. The runtime (instance) state of the NotebookRuntime. + * Required. The spec of the pools of different resources. */ - runtimeState?: string | null; + resourcePools?: Schema$GoogleCloudAiplatformV1beta1ResourcePool[]; /** - * Required. The user email of the NotebookRuntime. + * Output only. Runtime information of the Persistent Resource. */ - runtimeUser?: string | null; + resourceRuntime?: Schema$GoogleCloudAiplatformV1beta1ResourceRuntime; /** - * Output only. Reserved for future use. + * Optional. Persistent Resource runtime spec. For example, used for Ray cluster configuration. */ - satisfiesPzi?: boolean | null; + resourceRuntimeSpec?: Schema$GoogleCloudAiplatformV1beta1ResourceRuntimeSpec; /** - * Output only. Reserved for future use. + * Output only. Time when the PersistentResource for the first time entered the `RUNNING` state. */ - satisfiesPzs?: boolean | null; + startTime?: string | null; /** - * Output only. The service account that the NotebookRuntime workload runs as. + * Output only. The detailed state of a Study. */ - serviceAccount?: string | null; + state?: string | null; /** - * Output only. Timestamp when this NotebookRuntime was most recently updated. + * Output only. Time when the PersistentResource was most recently updated. */ updateTime?: string | null; - /** - * Output only. The VM os image version of NotebookRuntime. - */ - version?: string | null; } /** - * A template that specifies runtime configurations such as machine type, runtime version, network configurations, etc. Multiple runtimes can be created from a runtime template. + * An instance of a machine learning PipelineJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplate { + export interface Schema$GoogleCloudAiplatformV1beta1PipelineJob { /** - * Output only. Timestamp when this NotebookRuntimeTemplate was created. + * Output only. Pipeline creation time. */ createTime?: string | null; /** - * Optional. The specification of persistent disk attached to the runtime as data disk storage. + * The display name of the Pipeline. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - dataPersistentDiskSpec?: Schema$GoogleCloudAiplatformV1beta1PersistentDiskSpec; + displayName?: string | null; /** - * The description of the NotebookRuntimeTemplate. + * Customer-managed encryption key spec for a pipelineJob. If set, this PipelineJob and all of its sub-resources will be secured by this key. */ - description?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Required. The display name of the NotebookRuntimeTemplate. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Output only. Pipeline end time. */ - displayName?: string | null; + endTime?: string | null; /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Output only. The error that occurred during pipeline execution. Only populated when the pipeline's state is FAILED or CANCELLED. */ - etag?: string | null; + error?: Schema$GoogleRpcStatus; /** - * EUC configuration of the NotebookRuntimeTemplate. + * Output only. The details of pipeline run. Not available in the list view. */ - eucConfig?: Schema$GoogleCloudAiplatformV1beta1NotebookEucConfig; + jobDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineJobDetail; /** - * The idle shutdown configuration of NotebookRuntimeTemplate. This config will only be set when idle shutdown is enabled. + * The labels with user-defined metadata to organize PipelineJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. Note there is some reserved label key for Vertex AI Pipelines. - `vertex-ai-pipelines-run-billing-id`, user set value will get overrided. */ - idleShutdownConfig?: Schema$GoogleCloudAiplatformV1beta1NotebookIdleShutdownConfig; + labels?: {[key: string]: string} | null; /** - * Output only. The default template to use if not specified. + * Output only. The resource name of the PipelineJob. */ - isDefault?: boolean | null; + name?: string | null; /** - * The labels with user-defined metadata to organize the NotebookRuntimeTemplates. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. */ - labels?: {[key: string]: string} | null; + network?: string | null; /** - * Optional. Immutable. The specification of a single machine for the template. + * The spec of the pipeline. */ - machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; + pipelineSpec?: {[key: string]: any} | null; /** - * The resource name of the NotebookRuntimeTemplate. + * Optional. Whether to do component level validations before job creation. */ - name?: string | null; + preflightValidations?: boolean | null; /** - * Optional. Network spec. + * A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. */ - networkSpec?: Schema$GoogleCloudAiplatformV1beta1NetworkSpec; + reservedIpRanges?: string[] | null; /** - * Optional. The Compute Engine tags to add to runtime (see [Tagging instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). + * Runtime config of the pipeline. */ - networkTags?: string[] | null; + runtimeConfig?: Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfig; /** - * Optional. Immutable. The type of the notebook runtime template. + * Output only. The schedule resource name. Only returned if the Pipeline is created by Schedule API. */ - notebookRuntimeType?: string | null; + scheduleName?: string | null; /** - * Optional. Reservation Affinity of the notebook runtime template. + * The service account that the pipeline workload runs as. If not specified, the Compute Engine default service account in the project will be used. See https://cloud.google.com/compute/docs/access/service-accounts#default_service_account Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account. */ - reservationAffinity?: Schema$GoogleCloudAiplatformV1beta1NotebookReservationAffinity; + serviceAccount?: string | null; /** - * The service account that the runtime workload runs as. You can use any service account within the same project, but you must have the service account user permission to use the instance. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. + * Output only. Pipeline start time. */ - serviceAccount?: string | null; + startTime?: string | null; /** - * Optional. Immutable. Runtime Shielded VM spec. + * Output only. The detailed state of the job. */ - shieldedVmConfig?: Schema$GoogleCloudAiplatformV1beta1ShieldedVmConfig; + state?: string | null; /** - * Output only. Timestamp when this NotebookRuntimeTemplate was most recently updated. + * Output only. Pipeline template metadata. Will fill up fields if PipelineJob.template_uri is from supported template registry. + */ + templateMetadata?: Schema$GoogleCloudAiplatformV1beta1PipelineTemplateMetadata; + /** + * A template uri from where the PipelineJob.pipeline_spec, if empty, will be downloaded. Currently, only uri from Vertex Template Registry & Gallery is supported. Reference to https://cloud.google.com/vertex-ai/docs/pipelines/create-pipeline-template. + */ + templateUri?: string | null; + /** + * Output only. Timestamp when this PipelineJob was most recently updated. */ updateTime?: string | null; } /** - * Points to a NotebookRuntimeTemplateRef. + * The runtime detail of PipelineJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1NotebookRuntimeTemplateRef { + export interface Schema$GoogleCloudAiplatformV1beta1PipelineJobDetail { /** - * Immutable. A resource name of the NotebookRuntimeTemplate. + * Output only. The context of the pipeline. */ - notebookRuntimeTemplate?: string | null; - } - /** - * Input for pairwise question answering quality metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInput { + pipelineContext?: Schema$GoogleCloudAiplatformV1beta1Context; /** - * Required. Pairwise question answering quality instance. + * Output only. The context of the current pipeline run. */ - instance?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInstance; + pipelineRunContext?: Schema$GoogleCloudAiplatformV1beta1Context; /** - * Required. Spec for pairwise question answering quality score metric. + * Output only. The runtime details of the tasks under the pipeline. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualitySpec; + taskDetails?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetail[]; } /** - * Spec for pairwise question answering quality instance. + * The runtime config of a PipelineJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInstance { + export interface Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfig { /** - * Required. Output of the baseline model. + * Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. */ - baselinePrediction?: string | null; + failurePolicy?: string | null; /** - * Required. Text to answer the question. + * Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id\}/{task_id\}/{output_key\}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. */ - context?: string | null; + gcsOutputDirectory?: string | null; /** - * Required. Question Answering prompt for LLM. + * The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. */ - instruction?: string | null; + inputArtifacts?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigInputArtifact; + } | null; /** - * Required. Output of the candidate model. + * Deprecated. Use RuntimeConfig.parameter_values instead. The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower, such as pipelines built using Kubeflow Pipelines SDK 1.8 or lower. */ - prediction?: string | null; + parameters?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1Value; + } | null; /** - * Optional. Ground truth used to compare against the prediction. + * The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as pipelines built using Kubeflow Pipelines SDK 1.9 or higher and the v2 DSL. */ - reference?: string | null; + parameterValues?: {[key: string]: any} | null; } /** - * Spec for pairwise question answering quality result. + * The type of an input artifact. */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityResult { - /** - * Output only. Confidence for question answering quality score. - */ - confidence?: number | null; - /** - * Output only. Explanation for question answering quality score. - */ - explanation?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigInputArtifact { /** - * Output only. Pairwise question answering prediction choice. + * Artifact resource id from MLMD. Which is the last portion of an artifact resource name: `projects/{project\}/locations/{location\}/metadataStores/default/artifacts/{artifact_id\}`. The artifact must stay within the same project, location and default metadatastore as the pipeline. */ - pairwiseChoice?: string | null; + artifactId?: string | null; } /** - * Spec for pairwise question answering quality score metric. + * The runtime detail of a task execution. */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualitySpec { + export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetail { /** - * Optional. Whether to use instance.reference to compute question answering quality. + * Output only. Task create time. */ - useReference?: boolean | null; + createTime?: string | null; /** - * Optional. Which version to use for evaluation. + * Output only. Task end time. */ - version?: number | null; - } - /** - * Input for pairwise summarization quality metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInput { + endTime?: string | null; /** - * Required. Pairwise summarization quality instance. + * Output only. The error that occurred during task execution. Only populated when the task's state is FAILED or CANCELLED. */ - instance?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInstance; + error?: Schema$GoogleRpcStatus; /** - * Required. Spec for pairwise summarization quality score metric. + * Output only. The execution metadata of the task. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualitySpec; - } - /** - * Spec for pairwise summarization quality instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInstance { + execution?: Schema$GoogleCloudAiplatformV1beta1Execution; /** - * Required. Output of the baseline model. + * Output only. The detailed execution info. */ - baselinePrediction?: string | null; + executorDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetail; /** - * Required. Text to be summarized. + * Output only. The runtime input artifacts of the task. */ - context?: string | null; + inputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailArtifactList; + } | null; /** - * Required. Summarization prompt for LLM. + * Output only. The runtime output artifacts of the task. */ - instruction?: string | null; + outputs?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailArtifactList; + } | null; /** - * Required. Output of the candidate model. + * Output only. The id of the parent task if the task is within a component scope. Empty if the task is at the root level. */ - prediction?: string | null; + parentTaskId?: string | null; /** - * Optional. Ground truth used to compare against the prediction. + * Output only. A list of task status. This field keeps a record of task status evolving over time. */ - reference?: string | null; - } - /** - * Spec for pairwise summarization quality result. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityResult { + pipelineTaskStatus?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailPipelineTaskStatus[]; /** - * Output only. Confidence for summarization quality score. + * Output only. Task start time. */ - confidence?: number | null; + startTime?: string | null; /** - * Output only. Explanation for summarization quality score. + * Output only. State of the task. */ - explanation?: string | null; + state?: string | null; /** - * Output only. Pairwise summarization prediction choice. + * Output only. The system generated ID of the task. */ - pairwiseChoice?: string | null; + taskId?: string | null; + /** + * Output only. The user specified name of the task that is defined in pipeline_spec. + */ + taskName?: string | null; } /** - * Spec for pairwise summarization quality score metric. + * A list of artifact metadata. */ - export interface Schema$GoogleCloudAiplatformV1beta1PairwiseSummarizationQualitySpec { - /** - * Optional. Whether to use instance.reference to compute pairwise summarization quality. - */ - useReference?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailArtifactList { /** - * Optional. Which version to use for evaluation. + * Output only. A list of artifact metadata. */ - version?: number | null; + artifacts?: Schema$GoogleCloudAiplatformV1beta1Artifact[]; } /** - * A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + * A single record of the task status. */ - export interface Schema$GoogleCloudAiplatformV1beta1Part { + export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailPipelineTaskStatus { /** - * Optional. URI based data. + * Output only. The error that occurred during the state. May be set when the state is any of the non-final state (PENDING/RUNNING/CANCELLING) or FAILED state. If the state is FAILED, the error here is final and not going to be retried. If the state is a non-final state, the error indicates a system-error being retried. */ - fileData?: Schema$GoogleCloudAiplatformV1beta1FileData; + error?: Schema$GoogleRpcStatus; /** - * Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + * Output only. The state of the task. */ - functionCall?: Schema$GoogleCloudAiplatformV1beta1FunctionCall; + state?: string | null; /** - * Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + * Output only. Update time of this status. */ - functionResponse?: Schema$GoogleCloudAiplatformV1beta1FunctionResponse; + updateTime?: string | null; + } + /** + * The runtime detail of a pipeline executor. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetail { /** - * Optional. Inlined bytes data. + * Output only. The detailed info for a container executor. */ - inlineData?: Schema$GoogleCloudAiplatformV1beta1Blob; + containerDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailContainerDetail; /** - * Optional. Text part (can be code). + * Output only. The detailed info for a custom job executor. */ - text?: string | null; - /** - * Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. - */ - videoMetadata?: Schema$GoogleCloudAiplatformV1beta1VideoMetadata; - } - /** - * Request message for JobService.PauseModelDeploymentMonitoringJob. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PauseModelDeploymentMonitoringJobRequest {} - /** - * Request message for ScheduleService.PauseSchedule. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PauseScheduleRequest {} - /** - * Represents the spec of persistent disk options. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PersistentDiskSpec { - /** - * Size in GB of the disk (default is 100GB). - */ - diskSizeGb?: string | null; - /** - * Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) - */ - diskType?: string | null; + customJobDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailCustomJobDetail; } /** - * Represents long-lasting resources that are dedicated to users to runs custom workloads. A PersistentResource can have multiple node pools and each node pool can have its own machine spec. + * The detail of a container execution. It contains the job names of the lifecycle of a container execution. */ - export interface Schema$GoogleCloudAiplatformV1beta1PersistentResource { + export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailContainerDetail { /** - * Output only. Time when the PersistentResource was created. + * Output only. The names of the previously failed CustomJob for the main container executions. The list includes the all attempts in chronological order. */ - createTime?: string | null; + failedMainJobs?: string[] | null; /** - * Optional. The display name of the PersistentResource. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Output only. The names of the previously failed CustomJob for the pre-caching-check container executions. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. The list includes the all attempts in chronological order. */ - displayName?: string | null; + failedPreCachingCheckJobs?: string[] | null; /** - * Optional. Customer-managed encryption key spec for a PersistentResource. If set, this PersistentResource and all sub-resources of this PersistentResource will be secured by this key. + * Output only. The name of the CustomJob for the main container execution. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + mainJob?: string | null; /** - * Output only. Only populated when persistent resource's state is `STOPPING` or `ERROR`. + * Output only. The name of the CustomJob for the pre-caching-check container execution. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. */ - error?: Schema$GoogleRpcStatus; + preCachingCheckJob?: string | null; + } + /** + * The detailed info for a custom job executor. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailCustomJobDetail { /** - * Optional. The labels with user-defined metadata to organize PersistentResource. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Output only. The names of the previously failed CustomJob. The list includes the all attempts in chronological order. */ - labels?: {[key: string]: string} | null; + failedJobs?: string[] | null; /** - * Immutable. Resource name of a PersistentResource. + * Output only. The name of the CustomJob. */ - name?: string | null; + job?: string | null; + } + /** + * Pipeline template metadata if PipelineJob.template_uri is from supported template registry. Currently, the only supported registry is Artifact Registry. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PipelineTemplateMetadata { /** - * Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to peered with Vertex AI to host the persistent resources. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the resources aren't peered with any network. + * The version_name in artifact registry. Will always be presented in output if the PipelineJob.template_uri is from supported template registry. Format is "sha256:abcdef123456...". */ - network?: string | null; + version?: string | null; + } + /** + * Represents a network port in a container. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Port { /** - * Optional. A list of names for the reserved IP ranges under the VPC network that can be used for this persistent resource. If set, we will deploy the persistent resource within the provided IP ranges. Otherwise, the persistent resource is deployed to any IP ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + * The number of the port to expose on the pod's IP address. Must be a valid port number, between 1 and 65535 inclusive. */ - reservedIpRanges?: string[] | null; + containerPort?: number | null; + } + /** + * Assigns input data to training, validation, and test sets based on the value of a provided key. Supported only for tabular Datasets. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PredefinedSplit { /** - * Required. The spec of the pools of different resources. + * Required. The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {`training`, `validation`, `test`\}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. */ - resourcePools?: Schema$GoogleCloudAiplatformV1beta1ResourcePool[]; + key?: string | null; + } + /** + * Request message for PredictionService.Predict. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PredictRequest { /** - * Output only. Runtime information of the Persistent Resource. + * Required. The instances that are the input to the prediction call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the prediction call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. */ - resourceRuntime?: Schema$GoogleCloudAiplatformV1beta1ResourceRuntime; + instances?: any[] | null; /** - * Optional. Persistent Resource runtime spec. For example, used for Ray cluster configuration. + * The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' Model's PredictSchemata's parameters_schema_uri. */ - resourceRuntimeSpec?: Schema$GoogleCloudAiplatformV1beta1ResourceRuntimeSpec; + parameters?: any | null; + } + /** + * Configuration for logging request-response to a BigQuery table. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PredictRequestResponseLoggingConfig { /** - * Output only. Time when the PersistentResource for the first time entered the `RUNNING` state. + * BigQuery table for logging. If only given a project, a new dataset will be created with name `logging__` where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores). If no table name is given, a new table will be created with name `request_response_logging` */ - startTime?: string | null; + bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; /** - * Output only. The detailed state of a Study. + * If logging is enabled or not. */ - state?: string | null; + enabled?: boolean | null; /** - * Output only. Time when the PersistentResource was most recently updated. + * Percentage of requests to be logged, expressed as a fraction in range(0,1]. */ - updateTime?: string | null; + samplingRate?: number | null; } /** - * An instance of a machine learning PipelineJob. + * Response message for PredictionService.Predict. */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineJob { + export interface Schema$GoogleCloudAiplatformV1beta1PredictResponse { /** - * Output only. Pipeline creation time. + * ID of the Endpoint's DeployedModel that served this prediction. */ - createTime?: string | null; + deployedModelId?: string | null; /** - * The display name of the Pipeline. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Output only. Request-level metadata returned by the model. The metadata type will be dependent upon the model implementation. */ - displayName?: string | null; + metadata?: any | null; /** - * Customer-managed encryption key spec for a pipelineJob. If set, this PipelineJob and all of its sub-resources will be secured by this key. + * Output only. The resource name of the Model which is deployed as the DeployedModel that this prediction hits. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + model?: string | null; /** - * Output only. Pipeline end time. + * Output only. The display name of the Model which is deployed as the DeployedModel that this prediction hits. */ - endTime?: string | null; + modelDisplayName?: string | null; /** - * Output only. The error that occurred during pipeline execution. Only populated when the pipeline's state is FAILED or CANCELLED. + * Output only. The version ID of the Model which is deployed as the DeployedModel that this prediction hits. */ - error?: Schema$GoogleRpcStatus; + modelVersionId?: string | null; /** - * Output only. The details of pipeline run. Not available in the list view. + * The predictions that are the output of the predictions call. The schema of any single prediction may be specified via Endpoint's DeployedModels' Model's PredictSchemata's prediction_schema_uri. */ - jobDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineJobDetail; + predictions?: any[] | null; + } + /** + * Contains the schemata used in Model's predictions and explanations via PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PredictSchemata { /** - * The labels with user-defined metadata to organize PipelineJob. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. Note there is some reserved label key for Vertex AI Pipelines. - `vertex-ai-pipelines-run-billing-id`, user set value will get overrided. + * Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in PredictRequest.instances, ExplainRequest.instances and BatchPredictionJob.input_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - labels?: {[key: string]: string} | null; + instanceSchemaUri?: string | null; /** - * Output only. The resource name of the PipelineJob. + * Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via PredictRequest.parameters, ExplainRequest.parameters and BatchPredictionJob.model_parameters. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no parameters are supported, then it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - name?: string | null; + parametersSchemaUri?: string | null; /** - * The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project\}/global/networks/{network\}`. Where {project\} is a project number, as in `12345`, and {network\} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. + * Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via PredictResponse.predictions, ExplainResponse.explanations, and BatchPredictionJob.output_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - network?: string | null; + predictionSchemaUri?: string | null; + } + /** + * Preset configuration for example-based explanations + */ + export interface Schema$GoogleCloudAiplatformV1beta1Presets { /** - * The spec of the pipeline. + * The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. */ - pipelineSpec?: {[key: string]: any} | null; + modality?: string | null; /** - * Optional. Whether to do component level validations before job creation. + * Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. */ - preflightValidations?: boolean | null; + query?: string | null; + } + /** + * PrivateEndpoints proto is used to provide paths for users to send requests privately. To send request via private service access, use predict_http_uri, explain_http_uri or health_http_uri. To send request via private service connect, use service_attachment. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PrivateEndpoints { /** - * A list of names for the reserved ip ranges under the VPC network that can be used for this Pipeline Job's workload. If set, we will deploy the Pipeline Job's workload within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. + * Output only. Http(s) path to send explain requests. */ - reservedIpRanges?: string[] | null; + explainHttpUri?: string | null; /** - * Runtime config of the pipeline. + * Output only. Http(s) path to send health check requests. */ - runtimeConfig?: Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfig; + healthHttpUri?: string | null; /** - * Output only. The schedule resource name. Only returned if the Pipeline is created by Schedule API. + * Output only. Http(s) path to send prediction requests. */ - scheduleName?: string | null; + predictHttpUri?: string | null; /** - * The service account that the pipeline workload runs as. If not specified, the Compute Engine default service account in the project will be used. See https://cloud.google.com/compute/docs/access/service-accounts#default_service_account Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account. + * Output only. The name of the service attachment resource. Populated if private service connect is enabled. */ - serviceAccount?: string | null; + serviceAttachment?: string | null; + } + /** + * Represents configuration for private service connect. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig { /** - * Output only. Pipeline start time. + * Required. If true, expose the IndexEndpoint via private service connect. */ - startTime?: string | null; + enablePrivateServiceConnect?: boolean | null; /** - * Output only. The detailed state of the job. + * A list of Projects from which the forwarding rule will target the service attachment. */ - state?: string | null; + projectAllowlist?: string[] | null; + } + /** + * Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Probe { /** - * Output only. Pipeline template metadata. Will fill up fields if PipelineJob.template_uri is from supported template registry. + * Exec specifies the action to take. */ - templateMetadata?: Schema$GoogleCloudAiplatformV1beta1PipelineTemplateMetadata; + exec?: Schema$GoogleCloudAiplatformV1beta1ProbeExecAction; /** - * A template uri from where the PipelineJob.pipeline_spec, if empty, will be downloaded. Currently, only uri from Vertex Template Registry & Gallery is supported. Reference to https://cloud.google.com/vertex-ai/docs/pipelines/create-pipeline-template. + * How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Must be less than timeout_seconds. Maps to Kubernetes probe argument 'periodSeconds'. */ - templateUri?: string | null; + periodSeconds?: number | null; /** - * Output only. Timestamp when this PipelineJob was most recently updated. + * Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Must be greater or equal to period_seconds. Maps to Kubernetes probe argument 'timeoutSeconds'. */ - updateTime?: string | null; + timeoutSeconds?: number | null; } /** - * The runtime detail of PipelineJob. + * ExecAction specifies a command to execute. */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineJobDetail { - /** - * Output only. The context of the pipeline. - */ - pipelineContext?: Schema$GoogleCloudAiplatformV1beta1Context; - /** - * Output only. The context of the current pipeline run. - */ - pipelineRunContext?: Schema$GoogleCloudAiplatformV1beta1Context; + export interface Schema$GoogleCloudAiplatformV1beta1ProbeExecAction { /** - * Output only. The runtime details of the tasks under the pipeline. + * Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. */ - taskDetails?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetail[]; + command?: string[] | null; } /** - * The runtime config of a PipelineJob. + * PscAutomatedEndpoints defines the output of the forwarding rule automatically created by each PscAutomationConfig. */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfig { + export interface Schema$GoogleCloudAiplatformV1beta1PscAutomatedEndpoints { /** - * Represents the failure policy of a pipeline. Currently, the default of a pipeline is that the pipeline will continue to run until no more tasks can be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new tasks when a task has failed. Any scheduled tasks will continue to completion. + * Ip Address created by the automated forwarding rule. */ - failurePolicy?: string | null; + matchAddress?: string | null; /** - * Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is used by the system to generate the paths of output artifacts. The artifact paths are generated with a sub-path pattern `{job_id\}/{task_id\}/{output_key\}` under the specified output directory. The service account specified in this pipeline must have the `storage.objects.get` and `storage.objects.create` permissions for this bucket. + * Corresponding network in pscAutomationConfigs. */ - gcsOutputDirectory?: string | null; + network?: string | null; /** - * The runtime artifacts of the PipelineJob. The key will be the input artifact name and the value would be one of the InputArtifact. + * Corresponding project_id in pscAutomationConfigs */ - inputArtifacts?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigInputArtifact; - } | null; + projectId?: string | null; + } + /** + * A Model Garden Publisher Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModel { /** - * Deprecated. Use RuntimeConfig.parameter_values instead. The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower, such as pipelines built using Kubeflow Pipelines SDK 1.8 or lower. + * Optional. Additional information about the model's Frameworks. */ - parameters?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1Value; - } | null; + frameworks?: string[] | null; /** - * The runtime parameters of the PipelineJob. The parameters will be passed into PipelineJob.pipeline_spec to replace the placeholders at runtime. This field is used by pipelines built using `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as pipelines built using Kubeflow Pipelines SDK 1.9 or higher and the v2 DSL. + * Optional. Indicates the launch stage of the model. */ - parameterValues?: {[key: string]: any} | null; - } - /** - * The type of an input artifact. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineJobRuntimeConfigInputArtifact { + launchStage?: string | null; /** - * Artifact resource id from MLMD. Which is the last portion of an artifact resource name: `projects/{project\}/locations/{location\}/metadataStores/default/artifacts/{artifact_id\}`. The artifact must stay within the same project, location and default metadatastore as the pipeline. + * Output only. The resource name of the PublisherModel. */ - artifactId?: string | null; + name?: string | null; + /** + * Required. Indicates the open source category of the publisher model. + */ + openSourceCategory?: string | null; + /** + * Optional. The parent that this model was customized from. E.g., Vision API, Natural Language API, LaMDA, T5, etc. Foundation models don't have parents. + */ + parent?: Schema$GoogleCloudAiplatformV1beta1PublisherModelParent; + /** + * Optional. The schemata that describes formats of the PublisherModel's predictions and explanations as given and returned via PredictionService.Predict. + */ + predictSchemata?: Schema$GoogleCloudAiplatformV1beta1PredictSchemata; + /** + * Optional. Output only. Immutable. Used to indicate this model has a publisher model and provide the template of the publisher model resource name. + */ + publisherModelTemplate?: string | null; + /** + * Optional. Supported call-to-action options. + */ + supportedActions?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToAction; + /** + * Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation. + */ + versionId?: string | null; + /** + * Optional. Indicates the state of the model version. + */ + versionState?: string | null; } /** - * The runtime detail of a task execution. + * Actions could take on this Publisher Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetail { + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToAction { /** - * Output only. Task create time. + * Optional. Create application using the PublisherModel. */ - createTime?: string | null; + createApplication?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. Task end time. + * Optional. Deploy the PublisherModel to Vertex Endpoint. */ - endTime?: string | null; + deploy?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeploy; /** - * Output only. The error that occurred during task execution. Only populated when the task's state is FAILED or CANCELLED. + * Optional. Deploy PublisherModel to Google Kubernetes Engine. */ - error?: Schema$GoogleRpcStatus; + deployGke?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeployGke; /** - * Output only. The execution metadata of the task. + * Optional. Fine tune the PublisherModel with the third-party model tuning UI. */ - execution?: Schema$GoogleCloudAiplatformV1beta1Execution; + fineTune?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. The detailed execution info. + * Optional. Open evaluation pipeline of the PublisherModel. */ - executorDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetail; + openEvaluationPipeline?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. The runtime input artifacts of the task. + * Optional. Open fine-tuning pipeline of the PublisherModel. */ - inputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailArtifactList; - } | null; + openFineTuningPipeline?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. The runtime output artifacts of the task. + * Optional. Open fine-tuning pipelines of the PublisherModel. */ - outputs?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailArtifactList; - } | null; + openFineTuningPipelines?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenFineTuningPipelines; /** - * Output only. The id of the parent task if the task is within a component scope. Empty if the task is at the root level. + * Optional. Open in Generation AI Studio. */ - parentTaskId?: string | null; + openGenerationAiStudio?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. A list of task status. This field keeps a record of task status evolving over time. + * Optional. Open Genie / Playground. */ - pipelineTaskStatus?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailPipelineTaskStatus[]; + openGenie?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. Task start time. + * Optional. Open notebook of the PublisherModel. */ - startTime?: string | null; + openNotebook?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. State of the task. + * Optional. Open notebooks of the PublisherModel. */ - state?: string | null; + openNotebooks?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenNotebooks; /** - * Output only. The system generated ID of the task. + * Optional. Open prompt-tuning pipeline of the PublisherModel. */ - taskId?: string | null; + openPromptTuningPipeline?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. The user specified name of the task that is defined in pipeline_spec. + * Optional. Request for access. */ - taskName?: string | null; - } - /** - * A list of artifact metadata. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailArtifactList { + requestAccess?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; /** - * Output only. A list of artifact metadata. + * Optional. To view Rest API docs. */ - artifacts?: Schema$GoogleCloudAiplatformV1beta1Artifact[]; + viewRestApi?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionViewRestApi; } /** - * A single record of the task status. + * Model metadata that is needed for UploadModel or DeployModel/CreateEndpoint requests. */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskDetailPipelineTaskStatus { - /** - * Output only. The error that occurred during the state. May be set when the state is any of the non-final state (PENDING/RUNNING/CANCELLING) or FAILED state. If the state is FAILED, the error here is final and not going to be retried. If the state is a non-final state, the error indicates a system-error being retried. - */ - error?: Schema$GoogleRpcStatus; + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeploy { /** - * Output only. The state of the task. + * Optional. The path to the directory containing the Model artifact and any of its supporting files. */ - state?: string | null; + artifactUri?: string | null; /** - * Output only. Update time of this status. + * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. */ - updateTime?: string | null; - } - /** - * The runtime detail of a pipeline executor. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetail { + automaticResources?: Schema$GoogleCloudAiplatformV1beta1AutomaticResources; /** - * Output only. The detailed info for a container executor. + * Optional. The specification of the container that is to be used when deploying this Model in Vertex AI. Not present for Large Models. */ - containerDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailContainerDetail; + containerSpec?: Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec; /** - * Output only. The detailed info for a custom job executor. + * A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. */ - customJobDetail?: Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailCustomJobDetail; - } - /** - * The detail of a container execution. It contains the job names of the lifecycle of a container execution. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailContainerDetail { + dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; /** - * Output only. The names of the previously failed CustomJob for the main container executions. The list includes the all attempts in chronological order. + * Optional. The name of the deploy task (e.g., "text to image generation"). */ - failedMainJobs?: string[] | null; + deployTaskName?: string | null; /** - * Output only. The names of the previously failed CustomJob for the pre-caching-check container executions. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. The list includes the all attempts in chronological order. + * Optional. Large model reference. When this is set, model_artifact_spec is not needed. */ - failedPreCachingCheckJobs?: string[] | null; + largeModelReference?: Schema$GoogleCloudAiplatformV1beta1LargeModelReference; /** - * Output only. The name of the CustomJob for the main container execution. + * Optional. Default model display name. */ - mainJob?: string | null; + modelDisplayName?: string | null; /** - * Output only. The name of the CustomJob for the pre-caching-check container execution. This job will be available if the PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the lifecycle events. + * Optional. The signed URI for ephemeral Cloud Storage access to model artifact. */ - preCachingCheckJob?: string | null; - } - /** - * The detailed info for a custom job executor. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineTaskExecutorDetailCustomJobDetail { + publicArtifactUri?: string | null; /** - * Output only. The names of the previously failed CustomJob. The list includes the all attempts in chronological order. + * The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` */ - failedJobs?: string[] | null; + sharedResources?: string | null; /** - * Output only. The name of the CustomJob. + * Required. The title of the regional resource reference. */ - job?: string | null; + title?: string | null; } /** - * Pipeline template metadata if PipelineJob.template_uri is from supported template registry. Currently, the only supported registry is Artifact Registry. + * Configurations for PublisherModel GKE deployment */ - export interface Schema$GoogleCloudAiplatformV1beta1PipelineTemplateMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeployGke { /** - * The version_name in artifact registry. Will always be presented in output if the PipelineJob.template_uri is from supported template registry. Format is "sha256:abcdef123456...". + * Optional. GKE deployment configuration in yaml format. */ - version?: string | null; + gkeYamlConfigs?: string[] | null; } /** - * Represents a network port in a container. + * Open fine tuning pipelines. */ - export interface Schema$GoogleCloudAiplatformV1beta1Port { + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenFineTuningPipelines { /** - * The number of the port to expose on the pod's IP address. Must be a valid port number, between 1 and 65535 inclusive. + * Required. Regional resource references to fine tuning pipelines. */ - containerPort?: number | null; + fineTuningPipelines?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences[]; } /** - * Assigns input data to training, validation, and test sets based on the value of a provided key. Supported only for tabular Datasets. + * Open notebooks. */ - export interface Schema$GoogleCloudAiplatformV1beta1PredefinedSplit { + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenNotebooks { /** - * Required. The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {`training`, `validation`, `test`\}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. + * Required. Regional resource references to notebooks. */ - key?: string | null; + notebooks?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences[]; } /** - * Request message for PredictionService.Predict. + * The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. */ - export interface Schema$GoogleCloudAiplatformV1beta1PredictRequest { + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences { /** - * Required. The instances that are the input to the prediction call. A DeployedModel may have an upper limit on the number of instances it supports per request, and when it is exceeded the prediction call errors in case of AutoML Models, or, in case of customer created Models, the behaviour is as documented by that Model. The schema of any single instance may be specified via Endpoint's DeployedModels' Model's PredictSchemata's instance_schema_uri. + * Required. */ - instances?: any[] | null; + references?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1PublisherModelResourceReference; + } | null; /** - * The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' Model's PredictSchemata's parameters_schema_uri. + * Optional. Description of the resource. */ - parameters?: any | null; - } - /** - * Configuration for logging request-response to a BigQuery table. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PredictRequestResponseLoggingConfig { + resourceDescription?: string | null; /** - * BigQuery table for logging. If only given a project, a new dataset will be created with name `logging__` where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores). If no table name is given, a new table will be created with name `request_response_logging` + * Optional. Title of the resource. */ - bigqueryDestination?: Schema$GoogleCloudAiplatformV1beta1BigQueryDestination; + resourceTitle?: string | null; /** - * If logging is enabled or not. + * Optional. Use case (CUJ) of the resource. */ - enabled?: boolean | null; + resourceUseCase?: string | null; /** - * Percentage of requests to be logged, expressed as a fraction in range(0,1]. + * Required. */ - samplingRate?: number | null; + title?: string | null; } /** - * Response message for PredictionService.Predict. + * Rest API docs. */ - export interface Schema$GoogleCloudAiplatformV1beta1PredictResponse { + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionViewRestApi { /** - * ID of the Endpoint's DeployedModel that served this prediction. + * Required. */ - deployedModelId?: string | null; + documentations?: Schema$GoogleCloudAiplatformV1beta1PublisherModelDocumentation[]; /** - * Output only. Request-level metadata returned by the model. The metadata type will be dependent upon the model implementation. + * Required. The title of the view rest API. */ - metadata?: any | null; + title?: string | null; + } + /** + * A named piece of documentation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelDocumentation { /** - * Output only. The resource name of the Model which is deployed as the DeployedModel that this prediction hits. + * Required. Content of this piece of document (in Markdown format). */ - model?: string | null; + content?: string | null; /** - * Output only. The display name of the Model which is deployed as the DeployedModel that this prediction hits. + * Required. E.g., OVERVIEW, USE CASES, DOCUMENTATION, SDK & SAMPLES, JAVA, NODE.JS, etc.. */ - modelDisplayName?: string | null; + title?: string | null; + } + /** + * The information about the parent of a model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelParent { /** - * Output only. The version ID of the Model which is deployed as the DeployedModel that this prediction hits. + * Required. The display name of the parent. E.g., LaMDA, T5, Vision API, Natural Language API. */ - modelVersionId?: string | null; + displayName?: string | null; /** - * The predictions that are the output of the predictions call. The schema of any single prediction may be specified via Endpoint's DeployedModels' Model's PredictSchemata's prediction_schema_uri. + * Optional. The Google Cloud resource name or the URI reference. */ - predictions?: any[] | null; + reference?: Schema$GoogleCloudAiplatformV1beta1PublisherModelResourceReference; } /** - * Contains the schemata used in Model's predictions and explanations via PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. + * Reference to a resource. */ - export interface Schema$GoogleCloudAiplatformV1beta1PredictSchemata { + export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelResourceReference { /** - * Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in PredictRequest.instances, ExplainRequest.instances and BatchPredictionJob.input_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + * Description of the resource. */ - instanceSchemaUri?: string | null; + description?: string | null; /** - * Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via PredictRequest.parameters, ExplainRequest.parameters and BatchPredictionJob.model_parameters. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no parameters are supported, then it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + * The resource name of the Google Cloud resource. */ - parametersSchemaUri?: string | null; + resourceName?: string | null; /** - * Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via PredictResponse.predictions, ExplainResponse.explanations, and BatchPredictionJob.output_config. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + * The URI of the resource. */ - predictionSchemaUri?: string | null; + uri?: string | null; + /** + * Use case (CUJ) of the resource. + */ + useCase?: string | null; } /** - * Preset configuration for example-based explanations + * Details of operations that perform MetadataService.PurgeArtifacts. */ - export interface Schema$GoogleCloudAiplatformV1beta1Presets { - /** - * The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type. - */ - modality?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1PurgeArtifactsMetadata { /** - * Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`. + * Operation metadata for purging Artifacts. */ - query?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * PrivateEndpoints proto is used to provide paths for users to send requests privately. To send request via private service access, use predict_http_uri, explain_http_uri or health_http_uri. To send request via private service connect, use service_attachment. + * Request message for MetadataService.PurgeArtifacts. */ - export interface Schema$GoogleCloudAiplatformV1beta1PrivateEndpoints { + export interface Schema$GoogleCloudAiplatformV1beta1PurgeArtifactsRequest { /** - * Output only. Http(s) path to send explain requests. + * Required. A required filter matching the Artifacts to be purged. E.g., `update_time <= 2020-11-19T11:30:00-04:00`. */ - explainHttpUri?: string | null; + filter?: string | null; /** - * Output only. Http(s) path to send health check requests. + * Optional. Flag to indicate to actually perform the purge. If `force` is set to false, the method will return a sample of Artifact names that would be deleted. */ - healthHttpUri?: string | null; + force?: boolean | null; + } + /** + * Response message for MetadataService.PurgeArtifacts. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PurgeArtifactsResponse { /** - * Output only. Http(s) path to send prediction requests. + * The number of Artifacts that this request deleted (or, if `force` is false, the number of Artifacts that will be deleted). This can be an estimate. */ - predictHttpUri?: string | null; + purgeCount?: string | null; /** - * Output only. The name of the service attachment resource. Populated if private service connect is enabled. + * A sample of the Artifact names that will be deleted. Only populated if `force` is set to false. The maximum number of samples is 100 (it is possible to return fewer). */ - serviceAttachment?: string | null; + purgeSample?: string[] | null; } /** - * Represents configuration for private service connect. + * Details of operations that perform MetadataService.PurgeContexts. */ - export interface Schema$GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig { - /** - * Required. If true, expose the IndexEndpoint via private service connect. - */ - enablePrivateServiceConnect?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1PurgeContextsMetadata { /** - * A list of Projects from which the forwarding rule will target the service attachment. + * Operation metadata for purging Contexts. */ - projectAllowlist?: string[] | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + * Request message for MetadataService.PurgeContexts. */ - export interface Schema$GoogleCloudAiplatformV1beta1Probe { + export interface Schema$GoogleCloudAiplatformV1beta1PurgeContextsRequest { /** - * Exec specifies the action to take. + * Required. A required filter matching the Contexts to be purged. E.g., `update_time <= 2020-11-19T11:30:00-04:00`. */ - exec?: Schema$GoogleCloudAiplatformV1beta1ProbeExecAction; + filter?: string | null; /** - * How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Must be less than timeout_seconds. Maps to Kubernetes probe argument 'periodSeconds'. + * Optional. Flag to indicate to actually perform the purge. If `force` is set to false, the method will return a sample of Context names that would be deleted. */ - periodSeconds?: number | null; + force?: boolean | null; + } + /** + * Response message for MetadataService.PurgeContexts. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PurgeContextsResponse { /** - * Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Must be greater or equal to period_seconds. Maps to Kubernetes probe argument 'timeoutSeconds'. + * The number of Contexts that this request deleted (or, if `force` is false, the number of Contexts that will be deleted). This can be an estimate. */ - timeoutSeconds?: number | null; + purgeCount?: string | null; + /** + * A sample of the Context names that will be deleted. Only populated if `force` is set to false. The maximum number of samples is 100 (it is possible to return fewer). + */ + purgeSample?: string[] | null; } /** - * ExecAction specifies a command to execute. + * Details of operations that perform MetadataService.PurgeExecutions. */ - export interface Schema$GoogleCloudAiplatformV1beta1ProbeExecAction { + export interface Schema$GoogleCloudAiplatformV1beta1PurgeExecutionsMetadata { /** - * Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + * Operation metadata for purging Executions. */ - command?: string[] | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * PscAutomatedEndpoints defines the output of the forwarding rule automatically created by each PscAutomationConfig. + * Request message for MetadataService.PurgeExecutions. */ - export interface Schema$GoogleCloudAiplatformV1beta1PscAutomatedEndpoints { + export interface Schema$GoogleCloudAiplatformV1beta1PurgeExecutionsRequest { /** - * Ip Address created by the automated forwarding rule. + * Required. A required filter matching the Executions to be purged. E.g., `update_time <= 2020-11-19T11:30:00-04:00`. */ - matchAddress?: string | null; + filter?: string | null; /** - * Corresponding network in pscAutomationConfigs. + * Optional. Flag to indicate to actually perform the purge. If `force` is set to false, the method will return a sample of Execution names that would be deleted. */ - network?: string | null; + force?: boolean | null; + } + /** + * Response message for MetadataService.PurgeExecutions. + */ + export interface Schema$GoogleCloudAiplatformV1beta1PurgeExecutionsResponse { /** - * Corresponding project_id in pscAutomationConfigs + * The number of Executions that this request deleted (or, if `force` is false, the number of Executions that will be deleted). This can be an estimate. */ - projectId?: string | null; + purgeCount?: string | null; + /** + * A sample of the Execution names that will be deleted. Only populated if `force` is set to false. The maximum number of samples is 100 (it is possible to return fewer). + */ + purgeSample?: string[] | null; } /** - * A Model Garden Publisher Model. + * The spec of a Python packaged code. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModel { + export interface Schema$GoogleCloudAiplatformV1beta1PythonPackageSpec { /** - * Optional. Additional information about the model's Frameworks. + * Command line arguments to be passed to the Python task. */ - frameworks?: string[] | null; + args?: string[] | null; /** - * Optional. Indicates the launch stage of the model. + * Environment variables to be passed to the python module. Maximum limit is 100. */ - launchStage?: string | null; + env?: Schema$GoogleCloudAiplatformV1beta1EnvVar[]; /** - * Output only. The resource name of the PublisherModel. + * Required. The URI of a container image in Artifact Registry that will run the provided Python package. Vertex AI provides a wide range of executor images with pre-installed packages to meet users' various use cases. See the list of [pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). You must use an image from this list. */ - name?: string | null; + executorImageUri?: string | null; /** - * Required. Indicates the open source category of the publisher model. + * Required. The Google Cloud Storage location of the Python package files which are the training program and its dependent packages. The maximum number of package URIs is 100. */ - openSourceCategory?: string | null; + packageUris?: string[] | null; /** - * Optional. The parent that this model was customized from. E.g., Vision API, Natural Language API, LaMDA, T5, etc. Foundation models don't have parents. + * Required. The Python module name to run after installing the packages. */ - parent?: Schema$GoogleCloudAiplatformV1beta1PublisherModelParent; + pythonModule?: string | null; + } + /** + * Response message for QueryDeployedModels method. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QueryDeployedModelsResponse { /** - * Optional. The schemata that describes formats of the PublisherModel's predictions and explanations as given and returned via PredictionService.Predict. + * References to the DeployedModels that share the specified deploymentResourcePool. */ - predictSchemata?: Schema$GoogleCloudAiplatformV1beta1PredictSchemata; + deployedModelRefs?: Schema$GoogleCloudAiplatformV1beta1DeployedModelRef[]; /** - * Optional. Output only. Immutable. Used to indicate this model has a publisher model and provide the template of the publisher model resource name. + * DEPRECATED Use deployed_model_refs instead. */ - publisherModelTemplate?: string | null; + deployedModels?: Schema$GoogleCloudAiplatformV1beta1DeployedModel[]; /** - * Optional. Supported call-to-action options. + * A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - supportedActions?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToAction; + nextPageToken?: string | null; /** - * Output only. Immutable. The version ID of the PublisherModel. A new version is committed when a new model version is uploaded under an existing model id. It is an auto-incrementing decimal number in string representation. + * The total number of DeployedModels on this DeploymentResourcePool. */ - versionId?: string | null; + totalDeployedModelCount?: number | null; /** - * Optional. Indicates the state of the model version. + * The total number of Endpoints that have DeployedModels on this DeploymentResourcePool. */ - versionState?: string | null; + totalEndpointCount?: number | null; } /** - * Actions could take on this Publisher Model. + * Request message for ExtensionExecutionService.QueryExtension. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToAction { + export interface Schema$GoogleCloudAiplatformV1beta1QueryExtensionRequest { /** - * Optional. Create application using the PublisherModel. + * Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. */ - createApplication?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + contents?: Schema$GoogleCloudAiplatformV1beta1Content[]; + } + /** + * Response message for ExtensionExecutionService.QueryExtension. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QueryExtensionResponse { /** - * Optional. Deploy the PublisherModel to Vertex Endpoint. + * Failure message if any. */ - deploy?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeploy; + failureMessage?: string | null; /** - * Optional. Deploy PublisherModel to Google Kubernetes Engine. + * Steps of extension or LLM interaction, can contain function call, function response, or text response. The last step contains the final response to the query. */ - deployGke?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeployGke; + steps?: Schema$GoogleCloudAiplatformV1beta1Content[]; + } + /** + * Request message for ReasoningEngineExecutionService.Query. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QueryReasoningEngineRequest { /** - * Optional. Fine tune the PublisherModel with the third-party model tuning UI. + * Optional. Input content provided by users in JSON object format. Examples include text query, function calling parameters, media bytes, etc. */ - fineTune?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + input?: {[key: string]: any} | null; + } + /** + * Response message for ReasoningEngineExecutionService.Query + */ + export interface Schema$GoogleCloudAiplatformV1beta1QueryReasoningEngineResponse { /** - * Optional. Open evaluation pipeline of the PublisherModel. + * Response provided by users in JSON object format. */ - openEvaluationPipeline?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + output?: any | null; + } + /** + * Input for question answering correctness metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInput { /** - * Optional. Open fine-tuning pipeline of the PublisherModel. + * Required. Question answering correctness instance. */ - openFineTuningPipeline?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInstance; /** - * Optional. Open fine-tuning pipelines of the PublisherModel. + * Required. Spec for question answering correctness score metric. */ - openFineTuningPipelines?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenFineTuningPipelines; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessSpec; + } + /** + * Spec for question answering correctness instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInstance { /** - * Optional. Open in Generation AI Studio. + * Optional. Text provided as context to answer the question. */ - openGenerationAiStudio?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + context?: string | null; /** - * Optional. Open Genie / Playground. + * Required. The question asked and other instruction in the inference prompt. */ - openGenie?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + instruction?: string | null; /** - * Optional. Open notebook of the PublisherModel. + * Required. Output of the evaluated model. */ - openNotebook?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + prediction?: string | null; /** - * Optional. Open notebooks of the PublisherModel. + * Optional. Ground truth used to compare against the prediction. */ - openNotebooks?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenNotebooks; + reference?: string | null; + } + /** + * Spec for question answering correctness result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessResult { /** - * Optional. Open prompt-tuning pipeline of the PublisherModel. + * Output only. Confidence for question answering correctness score. */ - openPromptTuningPipeline?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + confidence?: number | null; /** - * Optional. Request for access. + * Output only. Explanation for question answering correctness score. */ - requestAccess?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences; + explanation?: string | null; /** - * Optional. To view Rest API docs. + * Output only. Question Answering Correctness score. */ - viewRestApi?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionViewRestApi; + score?: number | null; } /** - * Model metadata that is needed for UploadModel or DeployModel/CreateEndpoint requests. + * Spec for question answering correctness metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeploy { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessSpec { /** - * Optional. The path to the directory containing the Model artifact and any of its supporting files. + * Optional. Whether to use instance.reference to compute question answering correctness. */ - artifactUri?: string | null; + useReference?: boolean | null; /** - * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. + * Optional. Which version to use for evaluation. */ - automaticResources?: Schema$GoogleCloudAiplatformV1beta1AutomaticResources; + version?: number | null; + } + /** + * Input for question answering helpfulness metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInput { /** - * Optional. The specification of the container that is to be used when deploying this Model in Vertex AI. Not present for Large Models. + * Required. Question answering helpfulness instance. */ - containerSpec?: Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec; + instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInstance; /** - * A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. + * Required. Spec for question answering helpfulness score metric. */ - dedicatedResources?: Schema$GoogleCloudAiplatformV1beta1DedicatedResources; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessSpec; + } + /** + * Spec for question answering helpfulness instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInstance { /** - * Optional. The name of the deploy task (e.g., "text to image generation"). + * Optional. Text provided as context to answer the question. */ - deployTaskName?: string | null; + context?: string | null; /** - * Optional. Large model reference. When this is set, model_artifact_spec is not needed. + * Required. The question asked and other instruction in the inference prompt. */ - largeModelReference?: Schema$GoogleCloudAiplatformV1beta1LargeModelReference; + instruction?: string | null; /** - * Optional. Default model display name. + * Required. Output of the evaluated model. */ - modelDisplayName?: string | null; + prediction?: string | null; /** - * Optional. The signed URI for ephemeral Cloud Storage access to model artifact. + * Optional. Ground truth used to compare against the prediction. */ - publicArtifactUri?: string | null; + reference?: string | null; + } + /** + * Spec for question answering helpfulness result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessResult { /** - * The resource name of the shared DeploymentResourcePool to deploy on. Format: `projects/{project\}/locations/{location\}/deploymentResourcePools/{deployment_resource_pool\}` + * Output only. Confidence for question answering helpfulness score. */ - sharedResources?: string | null; + confidence?: number | null; /** - * Required. The title of the regional resource reference. + * Output only. Explanation for question answering helpfulness score. */ - title?: string | null; - } - /** - * Configurations for PublisherModel GKE deployment - */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeployGke { + explanation?: string | null; /** - * Optional. GKE deployment configuration in yaml format. + * Output only. Question Answering Helpfulness score. */ - gkeYamlConfigs?: string[] | null; + score?: number | null; } /** - * Open fine tuning pipelines. + * Spec for question answering helpfulness metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenFineTuningPipelines { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessSpec { /** - * Required. Regional resource references to fine tuning pipelines. + * Optional. Whether to use instance.reference to compute question answering helpfulness. */ - fineTuningPipelines?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences[]; + useReference?: boolean | null; + /** + * Optional. Which version to use for evaluation. + */ + version?: number | null; } /** - * Open notebooks. + * Input for question answering quality metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionOpenNotebooks { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInput { /** - * Required. Regional resource references to notebooks. + * Required. Question answering quality instance. */ - notebooks?: Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences[]; + instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInstance; + /** + * Required. Spec for question answering quality score metric. + */ + metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualitySpec; } /** - * The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. + * Spec for question answering quality instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences { - /** - * Required. - */ - references?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1PublisherModelResourceReference; - } | null; + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInstance { /** - * Optional. Description of the resource. + * Required. Text to answer the question. */ - resourceDescription?: string | null; + context?: string | null; /** - * Optional. Title of the resource. + * Required. Question Answering prompt for LLM. */ - resourceTitle?: string | null; + instruction?: string | null; /** - * Optional. Use case (CUJ) of the resource. + * Required. Output of the evaluated model. */ - resourceUseCase?: string | null; + prediction?: string | null; /** - * Required. + * Optional. Ground truth used to compare against the prediction. */ - title?: string | null; + reference?: string | null; } /** - * Rest API docs. + * Spec for question answering quality result. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelCallToActionViewRestApi { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityResult { /** - * Required. + * Output only. Confidence for question answering quality score. */ - documentations?: Schema$GoogleCloudAiplatformV1beta1PublisherModelDocumentation[]; + confidence?: number | null; /** - * Required. The title of the view rest API. + * Output only. Explanation for question answering quality score. */ - title?: string | null; + explanation?: string | null; + /** + * Output only. Question Answering Quality score. + */ + score?: number | null; } /** - * A named piece of documentation. + * Spec for question answering quality score metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelDocumentation { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualitySpec { /** - * Required. Content of this piece of document (in Markdown format). + * Optional. Whether to use instance.reference to compute question answering quality. */ - content?: string | null; + useReference?: boolean | null; /** - * Required. E.g., OVERVIEW, USE CASES, DOCUMENTATION, SDK & SAMPLES, JAVA, NODE.JS, etc.. + * Optional. Which version to use for evaluation. */ - title?: string | null; + version?: number | null; } /** - * The information about the parent of a model. + * Input for question answering relevance metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelParent { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInput { /** - * Required. The display name of the parent. E.g., LaMDA, T5, Vision API, Natural Language API. + * Required. Question answering relevance instance. */ - displayName?: string | null; + instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInstance; /** - * Optional. The Google Cloud resource name or the URI reference. + * Required. Spec for question answering relevance score metric. */ - reference?: Schema$GoogleCloudAiplatformV1beta1PublisherModelResourceReference; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceSpec; } /** - * Reference to a resource. + * Spec for question answering relevance instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1PublisherModelResourceReference { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInstance { /** - * Description of the resource. + * Optional. Text provided as context to answer the question. */ - description?: string | null; + context?: string | null; /** - * The resource name of the Google Cloud resource. + * Required. The question asked and other instruction in the inference prompt. */ - resourceName?: string | null; + instruction?: string | null; /** - * The URI of the resource. + * Required. Output of the evaluated model. */ - uri?: string | null; + prediction?: string | null; /** - * Use case (CUJ) of the resource. + * Optional. Ground truth used to compare against the prediction. */ - useCase?: string | null; + reference?: string | null; } /** - * Details of operations that perform MetadataService.PurgeArtifacts. + * Spec for question answering relevance result. */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeArtifactsMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceResult { /** - * Operation metadata for purging Artifacts. + * Output only. Confidence for question answering relevance score. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for MetadataService.PurgeArtifacts. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeArtifactsRequest { + confidence?: number | null; /** - * Required. A required filter matching the Artifacts to be purged. E.g., `update_time <= 2020-11-19T11:30:00-04:00`. + * Output only. Explanation for question answering relevance score. */ - filter?: string | null; + explanation?: string | null; /** - * Optional. Flag to indicate to actually perform the purge. If `force` is set to false, the method will return a sample of Artifact names that would be deleted. + * Output only. Question Answering Relevance score. */ - force?: boolean | null; + score?: number | null; } /** - * Response message for MetadataService.PurgeArtifacts. + * Spec for question answering relevance metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeArtifactsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceSpec { /** - * The number of Artifacts that this request deleted (or, if `force` is false, the number of Artifacts that will be deleted). This can be an estimate. + * Optional. Whether to use instance.reference to compute question answering relevance. */ - purgeCount?: string | null; + useReference?: boolean | null; /** - * A sample of the Artifact names that will be deleted. Only populated if `force` is set to false. The maximum number of samples is 100 (it is possible to return fewer). + * Optional. Which version to use for evaluation. */ - purgeSample?: string[] | null; + version?: number | null; } /** - * Details of operations that perform MetadataService.PurgeContexts. + * Relevant contexts for one query. */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeContextsMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1RagContexts { /** - * Operation metadata for purging Contexts. + * All its contexts. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + contexts?: Schema$GoogleCloudAiplatformV1beta1RagContextsContext[]; } /** - * Request message for MetadataService.PurgeContexts. + * A context of the query. */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeContextsRequest { - /** - * Required. A required filter matching the Contexts to be purged. E.g., `update_time <= 2020-11-19T11:30:00-04:00`. - */ - filter?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1RagContextsContext { /** - * Optional. Flag to indicate to actually perform the purge. If `force` is set to false, the method will return a sample of Context names that would be deleted. + * The distance between the query vector and the context text vector. */ - force?: boolean | null; - } - /** - * Response message for MetadataService.PurgeContexts. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeContextsResponse { + distance?: number | null; /** - * The number of Contexts that this request deleted (or, if `force` is false, the number of Contexts that will be deleted). This can be an estimate. + * For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name. */ - purgeCount?: string | null; + sourceUri?: string | null; /** - * A sample of the Context names that will be deleted. Only populated if `force` is set to false. The maximum number of samples is 100 (it is possible to return fewer). + * The text chunk. */ - purgeSample?: string[] | null; + text?: string | null; } /** - * Details of operations that perform MetadataService.PurgeExecutions. + * A RagCorpus is a RagFile container and a project can have multiple RagCorpora. */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeExecutionsMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1RagCorpus { /** - * Operation metadata for purging Executions. + * Output only. Timestamp when this RagCorpus was created. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for MetadataService.PurgeExecutions. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeExecutionsRequest { + createTime?: string | null; /** - * Required. A required filter matching the Executions to be purged. E.g., `update_time <= 2020-11-19T11:30:00-04:00`. + * Optional. The description of the RagCorpus. */ - filter?: string | null; + description?: string | null; /** - * Optional. Flag to indicate to actually perform the purge. If `force` is set to false, the method will return a sample of Execution names that would be deleted. + * Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - force?: boolean | null; - } - /** - * Response message for MetadataService.PurgeExecutions. - */ - export interface Schema$GoogleCloudAiplatformV1beta1PurgeExecutionsResponse { + displayName?: string | null; /** - * The number of Executions that this request deleted (or, if `force` is false, the number of Executions that will be deleted). This can be an estimate. + * Output only. The resource name of the RagCorpus. */ - purgeCount?: string | null; + name?: string | null; /** - * A sample of the Execution names that will be deleted. Only populated if `force` is set to false. The maximum number of samples is 100 (it is possible to return fewer). + * Output only. Timestamp when this RagCorpus was last updated. */ - purgeSample?: string[] | null; + updateTime?: string | null; } /** - * The spec of a Python packaged code. + * A RagFile contains user data for chunking, embedding and indexing. */ - export interface Schema$GoogleCloudAiplatformV1beta1PythonPackageSpec { + export interface Schema$GoogleCloudAiplatformV1beta1RagFile { /** - * Command line arguments to be passed to the Python task. + * Output only. Timestamp when this RagFile was created. */ - args?: string[] | null; + createTime?: string | null; /** - * Environment variables to be passed to the python module. Maximum limit is 100. + * Optional. The description of the RagFile. */ - env?: Schema$GoogleCloudAiplatformV1beta1EnvVar[]; + description?: string | null; /** - * Required. The URI of a container image in Artifact Registry that will run the provided Python package. Vertex AI provides a wide range of executor images with pre-installed packages to meet users' various use cases. See the list of [pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). You must use an image from this list. + * Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request. */ - executorImageUri?: string | null; + directUploadSource?: Schema$GoogleCloudAiplatformV1beta1DirectUploadSource; /** - * Required. The Google Cloud Storage location of the Python package files which are the training program and its dependent packages. The maximum number of package URIs is 100. + * Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - packageUris?: string[] | null; + displayName?: string | null; /** - * Required. The Python module name to run after installing the packages. + * Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the Cloud Storage uri for now. */ - pythonModule?: string | null; - } - /** - * Response message for QueryDeployedModels method. - */ - export interface Schema$GoogleCloudAiplatformV1beta1QueryDeployedModelsResponse { + gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; /** - * References to the DeployedModels that share the specified deploymentResourcePool. + * Output only. Google Drive location. Supports importing individual files as well as Google Drive folders. */ - deployedModelRefs?: Schema$GoogleCloudAiplatformV1beta1DeployedModelRef[]; + googleDriveSource?: Schema$GoogleCloudAiplatformV1beta1GoogleDriveSource; /** - * DEPRECATED Use deployed_model_refs instead. + * Output only. The resource name of the RagFile. */ - deployedModels?: Schema$GoogleCloudAiplatformV1beta1DeployedModel[]; + name?: string | null; /** - * A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. + * Output only. The type of the RagFile. */ - nextPageToken?: string | null; + ragFileType?: string | null; /** - * The total number of DeployedModels on this DeploymentResourcePool. + * Output only. The size of the RagFile in bytes. */ - totalDeployedModelCount?: number | null; + sizeBytes?: string | null; /** - * The total number of Endpoints that have DeployedModels on this DeploymentResourcePool. + * Output only. Timestamp when this RagFile was last updated. */ - totalEndpointCount?: number | null; + updateTime?: string | null; } /** - * Request message for ExtensionExecutionService.QueryExtension. + * Specifies the size and overlap of chunks for RagFiles. */ - export interface Schema$GoogleCloudAiplatformV1beta1QueryExtensionRequest { + export interface Schema$GoogleCloudAiplatformV1beta1RagFileChunkingConfig { /** - * Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. + * The overlap between chunks. */ - contents?: Schema$GoogleCloudAiplatformV1beta1Content[]; + chunkOverlap?: number | null; + /** + * The size of the chunks. + */ + chunkSize?: number | null; } /** - * Response message for ExtensionExecutionService.QueryExtension. + * A query to retrieve relevant contexts. */ - export interface Schema$GoogleCloudAiplatformV1beta1QueryExtensionResponse { + export interface Schema$GoogleCloudAiplatformV1beta1RagQuery { /** - * Failure message if any. + * Optional. The number of contexts to retrieve. */ - failureMessage?: string | null; + similarityTopK?: number | null; /** - * Steps of extension or LLM interaction, can contain function call, function response, or text response. The last step contains the final response to the query. + * Optional. The query in text format to get relevant contexts. */ - steps?: Schema$GoogleCloudAiplatformV1beta1Content[]; + text?: string | null; } /** - * Request message for ReasoningEngineExecutionService.Query. + * Request message for PredictionService.RawPredict. */ - export interface Schema$GoogleCloudAiplatformV1beta1QueryReasoningEngineRequest { + export interface Schema$GoogleCloudAiplatformV1beta1RawPredictRequest { /** - * Optional. Input content provided by users in JSON object format. Examples include text query, function calling parameters, media bytes, etc. + * The prediction input. Supports HTTP headers and arbitrary data payload. A DeployedModel may have an upper limit on the number of instances it supports per request. When this limit it is exceeded for an AutoML model, the RawPredict method returns an error. When this limit is exceeded for a custom-trained model, the behavior varies depending on the model. You can specify the schema for each instance in the predict_schemata.instance_schema_uri field when you create a Model. This schema applies when you deploy the `Model` as a `DeployedModel` to an Endpoint and use the `RawPredict` method. */ - input?: {[key: string]: any} | null; + httpBody?: Schema$GoogleApiHttpBody; } /** - * Response message for ReasoningEngineExecutionService.Query + * Configuration for the Ray metrics. */ - export interface Schema$GoogleCloudAiplatformV1beta1QueryReasoningEngineResponse { + export interface Schema$GoogleCloudAiplatformV1beta1RayMetricSpec { /** - * Response provided by users in JSON object format. + * Optional. Flag to disable the Ray metrics collection. */ - output?: any | null; + disabled?: boolean | null; } /** - * Input for question answering correctness metric. + * Configuration information for the Ray cluster. For experimental launch, Ray cluster creation and Persistent cluster creation are 1:1 mapping: We will provision all the nodes within the Persistent cluster as Ray nodes. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInput { + export interface Schema$GoogleCloudAiplatformV1beta1RaySpec { /** - * Required. Question answering correctness instance. + * Optional. This will be used to indicate which resource pool will serve as the Ray head node(the first node within that pool). Will use the machine from the first workerpool as the head node by default if this field isn't set. */ - instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInstance; + headNodeResourcePoolId?: string | null; /** - * Required. Spec for question answering correctness score metric. + * Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images\} field. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessSpec; - } - /** - * Spec for question answering correctness instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInstance { + imageUri?: string | null; /** - * Optional. Text provided as context to answer the question. + * Optional. Ray metrics configurations. */ - context?: string | null; + rayMetricSpec?: Schema$GoogleCloudAiplatformV1beta1RayMetricSpec; /** - * Required. The question asked and other instruction in the inference prompt. + * Optional. Required if image_uri isn't set. A map of resource_pool_id to prebuild Ray image if user need to use different images for different head/worker pools. This map needs to cover all the resource pool ids. Example: { "ray_head_node_pool": "head image" "ray_worker_node_pool1": "worker image" "ray_worker_node_pool2": "another worker image" \} */ - instruction?: string | null; + resourcePoolImages?: {[key: string]: string} | null; + } + /** + * Request message for FeaturestoreOnlineServingService.ReadFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesRequest { /** - * Required. Output of the evaluated model. + * Required. ID for a specific entity. For example, for a machine learning model predicting user clicks on a website, an entity ID could be `user_123`. */ - prediction?: string | null; + entityId?: string | null; /** - * Optional. Ground truth used to compare against the prediction. + * Required. Selector choosing Features of the target EntityType. */ - reference?: string | null; + featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; } /** - * Spec for question answering correctness result. + * Response message for FeaturestoreOnlineServingService.ReadFeatureValues. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessResult { - /** - * Output only. Confidence for question answering correctness score. - */ - confidence?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponse { /** - * Output only. Explanation for question answering correctness score. + * Entity view with Feature values. This may be the entity in the Featurestore if values for all Features were requested, or a projection of the entity in the Featurestore if values for only some Features were requested. */ - explanation?: string | null; + entityView?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityView; /** - * Output only. Question Answering Correctness score. + * Response header. */ - score?: number | null; + header?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseHeader; } /** - * Spec for question answering correctness metric. + * Entity view with Feature values. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityView { /** - * Optional. Whether to use instance.reference to compute question answering correctness. + * Each piece of data holds the k requested values for one requested Feature. If no values for the requested Feature exist, the corresponding cell will be empty. This has the same size and is in the same order as the features from the header ReadFeatureValuesResponse.header. */ - useReference?: boolean | null; + data?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityViewData[]; /** - * Optional. Which version to use for evaluation. + * ID of the requested entity. */ - version?: number | null; + entityId?: string | null; } /** - * Input for question answering helpfulness metric. + * Container to hold value(s), successive in time, for one Feature from the request. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInput { + export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityViewData { /** - * Required. Question answering helpfulness instance. + * Feature value if a single value is requested. */ - instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInstance; + value?: Schema$GoogleCloudAiplatformV1beta1FeatureValue; /** - * Required. Spec for question answering helpfulness score metric. + * Feature values list if values, successive in time, are requested. If the requested number of values is greater than the number of existing Feature values, nonexistent values are omitted instead of being returned as empty. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessSpec; + values?: Schema$GoogleCloudAiplatformV1beta1FeatureValueList; } /** - * Spec for question answering helpfulness instance. + * Metadata for requested Features. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInstance { + export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseFeatureDescriptor { /** - * Optional. Text provided as context to answer the question. + * Feature ID. */ - context?: string | null; + id?: string | null; + } + /** + * Response header with metadata for the requested ReadFeatureValuesRequest.entity_type and Features. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseHeader { /** - * Required. The question asked and other instruction in the inference prompt. + * The resource name of the EntityType from the ReadFeatureValuesRequest. Value format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entityType\}`. */ - instruction?: string | null; + entityType?: string | null; /** - * Required. Output of the evaluated model. + * List of Feature metadata corresponding to each piece of ReadFeatureValuesResponse.EntityView.data. */ - prediction?: string | null; - /** - * Optional. Ground truth used to compare against the prediction. - */ - reference?: string | null; + featureDescriptors?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseFeatureDescriptor[]; } /** - * Spec for question answering helpfulness result. + * The request message for MatchService.ReadIndexDatapoints. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessResult { - /** - * Output only. Confidence for question answering helpfulness score. - */ - confidence?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1ReadIndexDatapointsRequest { /** - * Output only. Explanation for question answering helpfulness score. + * The ID of the DeployedIndex that will serve the request. */ - explanation?: string | null; + deployedIndexId?: string | null; /** - * Output only. Question Answering Helpfulness score. + * IDs of the datapoints to be searched for. */ - score?: number | null; + ids?: string[] | null; } /** - * Spec for question answering helpfulness metric. + * The response message for MatchService.ReadIndexDatapoints. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessSpec { + export interface Schema$GoogleCloudAiplatformV1beta1ReadIndexDatapointsResponse { /** - * Optional. Whether to use instance.reference to compute question answering helpfulness. + * The result list of datapoints. */ - useReference?: boolean | null; + datapoints?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint[]; + } + /** + * Response message for TensorboardService.ReadTensorboardBlobData. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardBlobDataResponse { /** - * Optional. Which version to use for evaluation. + * Blob messages containing blob bytes. */ - version?: number | null; + blobs?: Schema$GoogleCloudAiplatformV1beta1TensorboardBlob[]; } /** - * Input for question answering quality metric. + * Response message for TensorboardService.ReadTensorboardSize. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInput { + export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardSizeResponse { /** - * Required. Question answering quality instance. + * Payload storage size for the TensorBoard */ - instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInstance; + storageSizeByte?: string | null; + } + /** + * Response message for TensorboardService.ReadTensorboardTimeSeriesData. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardTimeSeriesDataResponse { /** - * Required. Spec for question answering quality score metric. + * The returned time series data. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualitySpec; + timeSeriesData?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesData; } /** - * Spec for question answering quality instance. + * Response message for TensorboardService.ReadTensorboardUsage. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInstance { + export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponse { /** - * Required. Text to answer the question. + * Maps year-month (YYYYMM) string to per month usage data. */ - context?: string | null; + monthlyUsageData?: { + [ + key: string + ]: Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerMonthUsageData; + } | null; + } + /** + * Per month usage data + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerMonthUsageData { /** - * Required. Question Answering prompt for LLM. + * Usage data for each user in the given month. */ - instruction?: string | null; + userUsageData?: Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerUserUsageData[]; + } + /** + * Per user usage data. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerUserUsageData { /** - * Required. Output of the evaluated model. + * User's username */ - prediction?: string | null; + username?: string | null; /** - * Optional. Ground truth used to compare against the prediction. + * Number of times the user has read data within the Tensorboard. */ - reference?: string | null; + viewCount?: string | null; } /** - * Spec for question answering quality result. + * ReasoningEngine provides a customizable runtime for models to determine which actions to take and in which order. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualityResult { + export interface Schema$GoogleCloudAiplatformV1beta1ReasoningEngine { /** - * Output only. Confidence for question answering quality score. + * Output only. Timestamp when this ReasoningEngine was created. */ - confidence?: number | null; + createTime?: string | null; /** - * Output only. Explanation for question answering quality score. + * Optional. The description of the ReasoningEngine. */ - explanation?: string | null; + description?: string | null; /** - * Output only. Question Answering Quality score. + * Required. The display name of the ReasoningEngine. */ - score?: number | null; - } - /** - * Spec for question answering quality score metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringQualitySpec { + displayName?: string | null; /** - * Optional. Whether to use instance.reference to compute question answering quality. + * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - useReference?: boolean | null; + etag?: string | null; /** - * Optional. Which version to use for evaluation. + * Identifier. The resource name of the ReasoningEngine. */ - version?: number | null; + name?: string | null; + /** + * Required. Configurations of the ReasoningEngine + */ + spec?: Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpec; + /** + * Output only. Timestamp when this ReasoningEngine was most recently updated. + */ + updateTime?: string | null; } /** - * Input for question answering relevance metric. + * ReasoningEngine configurations */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInput { + export interface Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpec { /** - * Required. Question answering relevance instance. + * Optional. Declarations for object class methods. */ - instance?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInstance; + classMethods?: Array<{[key: string]: any}> | null; /** - * Required. Spec for question answering relevance score metric. + * Required. User provided package spec of the ReasoningEngine. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceSpec; + packageSpec?: Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpecPackageSpec; } /** - * Spec for question answering relevance instance. + * User provided package spec like pickled object and package requirements. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInstance { + export interface Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpecPackageSpec { /** - * Optional. Text provided as context to answer the question. + * Optional. The Cloud Storage URI of the dependency files in tar.gz format. */ - context?: string | null; + dependencyFilesGcsUri?: string | null; /** - * Required. The question asked and other instruction in the inference prompt. + * Optional. The Cloud Storage URI of the pickled python object. */ - instruction?: string | null; + pickleObjectGcsUri?: string | null; /** - * Required. Output of the evaluated model. + * Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If not specified, default value is 3.10. */ - prediction?: string | null; + pythonVersion?: string | null; /** - * Optional. Ground truth used to compare against the prediction. + * Optional. The Cloud Storage URI of the `requirements.txt` file */ - reference?: string | null; + requirementsGcsUri?: string | null; } /** - * Spec for question answering relevance result. + * Details of operations that perform reboot PersistentResource. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceResult { - /** - * Output only. Confidence for question answering relevance score. - */ - confidence?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1RebootPersistentResourceOperationMetadata { /** - * Output only. Explanation for question answering relevance score. + * Operation metadata for PersistentResource. */ - explanation?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Output only. Question Answering Relevance score. + * Progress Message for Reboot LRO */ - score?: number | null; + progressMessage?: string | null; } /** - * Spec for question answering relevance metric. + * Request message for PersistentResourceService.RebootPersistentResource. */ - export interface Schema$GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceSpec { - /** - * Optional. Whether to use instance.reference to compute question answering relevance. - */ - useReference?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1RebootPersistentResourceRequest {} + /** + * Request message for MetadataService.DeleteContextChildrenRequest. + */ + export interface Schema$GoogleCloudAiplatformV1beta1RemoveContextChildrenRequest { /** - * Optional. Which version to use for evaluation. + * The resource names of the child Contexts. */ - version?: number | null; + childContexts?: string[] | null; } /** - * Relevant contexts for one query. + * Response message for MetadataService.RemoveContextChildren. */ - export interface Schema$GoogleCloudAiplatformV1beta1RagContexts { + export interface Schema$GoogleCloudAiplatformV1beta1RemoveContextChildrenResponse {} + /** + * Request message for IndexService.RemoveDatapoints + */ + export interface Schema$GoogleCloudAiplatformV1beta1RemoveDatapointsRequest { /** - * All its contexts. + * A list of datapoint ids to be deleted. */ - contexts?: Schema$GoogleCloudAiplatformV1beta1RagContextsContext[]; + datapointIds?: string[] | null; } /** - * A context of the query. + * Response message for IndexService.RemoveDatapoints */ - export interface Schema$GoogleCloudAiplatformV1beta1RagContextsContext { + export interface Schema$GoogleCloudAiplatformV1beta1RemoveDatapointsResponse {} + /** + * Request message for NotebookInternalService.ReportExecutionEvent. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReportExecutionEventRequest { /** - * The distance between the query vector and the context text vector. + * Required. The type of the event. */ - distance?: number | null; + eventType?: string | null; /** - * For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name. + * Optional. The error details of the event. */ - sourceUri?: string | null; + status?: Schema$GoogleRpcStatus; /** - * The text chunk. + * Required. The VM identity token (a JWT) for authenticating the VM. https://cloud.google.com/compute/docs/instances/verifying-instance-identity */ - text?: string | null; + vmToken?: string | null; } /** - * A RagCorpus is a RagFile container and a project can have multiple RagCorpora. + * Response message for NotebookInternalService.ReportExecutionEvent. */ - export interface Schema$GoogleCloudAiplatformV1beta1RagCorpus { + export interface Schema$GoogleCloudAiplatformV1beta1ReportExecutionEventResponse {} + /** + * Request message for NotebookInternalService.ReportRuntimeEvent. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ReportRuntimeEventRequest { /** - * Output only. Timestamp when this RagCorpus was created. + * Optional. The details of the request for debug. */ - createTime?: string | null; + eventDetails?: {[key: string]: string} | null; /** - * Optional. The description of the RagCorpus. + * Required. The type of the event. */ - description?: string | null; + eventType?: string | null; /** - * Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * The details of the internal os service states. */ - displayName?: string | null; + internalOsServiceStateInstance?: Schema$GoogleCloudAiplatformV1beta1InternalOsServiceStateInstance[]; /** - * Output only. The resource name of the RagCorpus. + * Optional. The details of the internal os service states. */ - name?: string | null; + internalOsServiceStateInstances?: Schema$GoogleCloudAiplatformV1beta1InternalOsServiceStateInstance[]; /** - * Output only. Timestamp when this RagCorpus was last updated. + * Required. The VM identity token (a JWT) for authenticating the VM. https://cloud.google.com/compute/docs/instances/verifying-instance-identity */ - updateTime?: string | null; + vmToken?: string | null; } /** - * A RagFile contains user data for chunking, embedding and indexing. + * Response message for NotebookInternalService.ReportRuntimeEvent. */ - export interface Schema$GoogleCloudAiplatformV1beta1RagFile { + export interface Schema$GoogleCloudAiplatformV1beta1ReportRuntimeEventResponse { /** - * Output only. Timestamp when this RagFile was created. + * If the idle shutdown is blocked by CP, CP will send the block message. Otherwise, this field is not set. */ - createTime?: string | null; - /** - * Optional. The description of the RagFile. - */ - description?: string | null; + idleShutdownMessage?: string | null; + } + /** + * Represents the spec of a group of resources of the same type, for example machine type, disk, and accelerators, in a PersistentResource. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ResourcePool { /** - * Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request. + * Optional. Optional spec to configure GKE autoscaling */ - directUploadSource?: Schema$GoogleCloudAiplatformV1beta1DirectUploadSource; + autoscalingSpec?: Schema$GoogleCloudAiplatformV1beta1ResourcePoolAutoscalingSpec; /** - * Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Optional. Disk spec for the machine in this node pool. */ - displayName?: string | null; + diskSpec?: Schema$GoogleCloudAiplatformV1beta1DiskSpec; /** - * Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the Cloud Storage uri for now. + * Immutable. The unique ID in a PersistentResource for referring to this resource pool. User can specify it if necessary. Otherwise, it's generated automatically. */ - gcsSource?: Schema$GoogleCloudAiplatformV1beta1GcsSource; + id?: string | null; /** - * Output only. Google Drive location. Supports importing individual files as well as Google Drive folders. + * Required. Immutable. The specification of a single machine. */ - googleDriveSource?: Schema$GoogleCloudAiplatformV1beta1GoogleDriveSource; + machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; /** - * Output only. The resource name of the RagFile. + * Optional. The total number of machines to use for this resource pool. */ - name?: string | null; + replicaCount?: string | null; /** - * Output only. The type of the RagFile. + * Output only. The number of machines currently in use by training jobs for this resource pool. Will replace idle_replica_count. */ - ragFileType?: string | null; + usedReplicaCount?: string | null; + } + /** + * The min/max number of replicas allowed if enabling autoscaling + */ + export interface Schema$GoogleCloudAiplatformV1beta1ResourcePoolAutoscalingSpec { /** - * Output only. The size of the RagFile in bytes. + * Optional. max replicas in the node pool, must be ≥ replica_count and \> min_replica_count or will throw error */ - sizeBytes?: string | null; + maxReplicaCount?: string | null; /** - * Output only. Timestamp when this RagFile was last updated. + * Optional. min replicas in the node pool, must be ≤ replica_count and < max_replica_count or will throw error */ - updateTime?: string | null; + minReplicaCount?: string | null; } /** - * Specifies the size and overlap of chunks for RagFiles. + * Persistent Cluster runtime information as output */ - export interface Schema$GoogleCloudAiplatformV1beta1RagFileChunkingConfig { + export interface Schema$GoogleCloudAiplatformV1beta1ResourceRuntime { /** - * The overlap between chunks. + * Output only. URIs for user to connect to the Cluster. Example: { "RAY_HEAD_NODE_INTERNAL_IP": "head-node-IP:10001" "RAY_DASHBOARD_URI": "ray-dashboard-address:8888" \} */ - chunkOverlap?: number | null; + accessUris?: {[key: string]: string} | null; /** - * The size of the chunks. + * Output only. The resource name of NotebookRuntimeTemplate for the RoV Persistent Cluster The NotebokRuntimeTemplate is created in the same VPC (if set), and with the same Ray and Python version as the Persistent Cluster. Example: "projects/1000/locations/us-central1/notebookRuntimeTemplates/abc123" */ - chunkSize?: number | null; + notebookRuntimeTemplate?: string | null; } /** - * A query to retrieve relevant contexts. + * Configuration for the runtime on a PersistentResource instance, including but not limited to: * Service accounts used to run the workloads. * Whether to make it a dedicated Ray Cluster. */ - export interface Schema$GoogleCloudAiplatformV1beta1RagQuery { + export interface Schema$GoogleCloudAiplatformV1beta1ResourceRuntimeSpec { /** - * Optional. The number of contexts to retrieve. + * Optional. Ray cluster configuration. Required when creating a dedicated RayCluster on the PersistentResource. */ - similarityTopK?: number | null; + raySpec?: Schema$GoogleCloudAiplatformV1beta1RaySpec; /** - * Optional. The query in text format to get relevant contexts. + * Optional. Configure the use of workload identity on the PersistentResource */ - text?: string | null; + serviceAccountSpec?: Schema$GoogleCloudAiplatformV1beta1ServiceAccountSpec; } /** - * Request message for PredictionService.RawPredict. + * Statistics information about resource consumption. */ - export interface Schema$GoogleCloudAiplatformV1beta1RawPredictRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ResourcesConsumed { /** - * The prediction input. Supports HTTP headers and arbitrary data payload. A DeployedModel may have an upper limit on the number of instances it supports per request. When this limit it is exceeded for an AutoML model, the RawPredict method returns an error. When this limit is exceeded for a custom-trained model, the behavior varies depending on the model. You can specify the schema for each instance in the predict_schemata.instance_schema_uri field when you create a Model. This schema applies when you deploy the `Model` as a `DeployedModel` to an Endpoint and use the `RawPredict` method. + * Output only. The number of replica hours used. Note that many replicas may run in parallel, and additionally any given work may be queued for some time. Therefore this value is not strictly related to wall time. */ - httpBody?: Schema$GoogleApiHttpBody; + replicaHours?: number | null; } /** - * Configuration for the Ray metrics. + * Runtime operation information for DatasetService.RestoreDatasetVersion. */ - export interface Schema$GoogleCloudAiplatformV1beta1RayMetricSpec { + export interface Schema$GoogleCloudAiplatformV1beta1RestoreDatasetVersionOperationMetadata { /** - * Optional. Flag to disable the Ray metrics collection. + * The common part of the operation metadata. */ - disabled?: boolean | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Configuration information for the Ray cluster. For experimental launch, Ray cluster creation and Persistent cluster creation are 1:1 mapping: We will provision all the nodes within the Persistent cluster as Ray nodes. + * Request message for JobService.ResumeModelDeploymentMonitoringJob. */ - export interface Schema$GoogleCloudAiplatformV1beta1RaySpec { + export interface Schema$GoogleCloudAiplatformV1beta1ResumeModelDeploymentMonitoringJobRequest {} + /** + * Request message for ScheduleService.ResumeSchedule. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ResumeScheduleRequest { /** - * Optional. This will be used to indicate which resource pool will serve as the Ray head node(the first node within that pool). Will use the machine from the first workerpool as the head node by default if this field isn't set. + * Optional. Whether to backfill missed runs when the schedule is resumed from PAUSED state. If set to true, all missed runs will be scheduled. New runs will be scheduled after the backfill is complete. This will also update Schedule.catch_up field. Default to false. */ - headNodeResourcePoolId?: string | null; + catchUp?: boolean | null; + } + /** + * Defines a retrieval tool that model can call to access external knowledge. + */ + export interface Schema$GoogleCloudAiplatformV1beta1Retrieval { /** - * Optional. Default image for user to choose a preferred ML framework (for example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). Either this or the resource_pool_images is required. Use this field if you need all the resource pools to have the same Ray image. Otherwise, use the {@code resource_pool_images\} field. + * Optional. Disable using the result from this tool in detecting grounding attribution. This does not affect how the result is given to the model for generation. */ - imageUri?: string | null; + disableAttribution?: boolean | null; /** - * Optional. Ray metrics configurations. + * Set to use data source powered by Vertex AI Search. */ - rayMetricSpec?: Schema$GoogleCloudAiplatformV1beta1RayMetricSpec; + vertexAiSearch?: Schema$GoogleCloudAiplatformV1beta1VertexAISearch; /** - * Optional. Required if image_uri isn't set. A map of resource_pool_id to prebuild Ray image if user need to use different images for different head/worker pools. This map needs to cover all the resource pool ids. Example: { "ray_head_node_pool": "head image" "ray_worker_node_pool1": "worker image" "ray_worker_node_pool2": "another worker image" \} + * Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. */ - resourcePoolImages?: {[key: string]: string} | null; + vertexRagStore?: Schema$GoogleCloudAiplatformV1beta1VertexRagStore; } /** - * Request message for FeaturestoreOnlineServingService.ReadFeatureValues. + * Request message for VertexRagService.RetrieveContexts. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequest { /** - * Required. ID for a specific entity. For example, for a machine learning model predicting user clicks on a website, an entity ID could be `user_123`. + * Required. Single RAG retrieve query. */ - entityId?: string | null; + query?: Schema$GoogleCloudAiplatformV1beta1RagQuery; /** - * Required. Selector choosing Features of the target EntityType. + * The data source for Vertex RagStore. */ - featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; + vertexRagStore?: Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore; } /** - * Response message for FeaturestoreOnlineServingService.ReadFeatureValues. + * The data source for Vertex RagStore. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponse { - /** - * Entity view with Feature values. This may be the entity in the Featurestore if values for all Features were requested, or a projection of the entity in the Featurestore if values for only some Features were requested. - */ - entityView?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityView; + export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore { /** - * Response header. + * Optional. Deprecated. Please use rag_resources to specify the data source. */ - header?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseHeader; - } - /** - * Entity view with Feature values. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityView { + ragCorpora?: string[] | null; /** - * Each piece of data holds the k requested values for one requested Feature. If no values for the requested Feature exist, the corresponding cell will be empty. This has the same size and is in the same order as the features from the header ReadFeatureValuesResponse.header. + * Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. */ - data?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityViewData[]; + ragResources?: Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStoreRagResource[]; /** - * ID of the requested entity. + * Optional. Only return contexts with vector distance smaller than the threshold. */ - entityId?: string | null; + vectorDistanceThreshold?: number | null; } /** - * Container to hold value(s), successive in time, for one Feature from the request. + * The definition of the Rag resource. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseEntityViewData { + export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStoreRagResource { /** - * Feature value if a single value is requested. + * Optional. RagCorpora resource name. Format: `projects/{project\}/locations/{location\}/ragCorpora/{rag_corpus\}` */ - value?: Schema$GoogleCloudAiplatformV1beta1FeatureValue; + ragCorpus?: string | null; /** - * Feature values list if values, successive in time, are requested. If the requested number of values is greater than the number of existing Feature values, nonexistent values are omitted instead of being returned as empty. + * Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. */ - values?: Schema$GoogleCloudAiplatformV1beta1FeatureValueList; + ragFileIds?: string[] | null; } /** - * Metadata for requested Features. + * Response message for VertexRagService.RetrieveContexts. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseFeatureDescriptor { + export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsResponse { /** - * Feature ID. + * The contexts of the query. */ - id?: string | null; + contexts?: Schema$GoogleCloudAiplatformV1beta1RagContexts; } /** - * Response header with metadata for the requested ReadFeatureValuesRequest.entity_type and Features. + * Input for rouge metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseHeader { + export interface Schema$GoogleCloudAiplatformV1beta1RougeInput { /** - * The resource name of the EntityType from the ReadFeatureValuesRequest. Value format: `projects/{project\}/locations/{location\}/featurestores/{featurestore\}/entityTypes/{entityType\}`. + * Required. Repeated rouge instances. */ - entityType?: string | null; + instances?: Schema$GoogleCloudAiplatformV1beta1RougeInstance[]; /** - * List of Feature metadata corresponding to each piece of ReadFeatureValuesResponse.EntityView.data. + * Required. Spec for rouge score metric. */ - featureDescriptors?: Schema$GoogleCloudAiplatformV1beta1ReadFeatureValuesResponseFeatureDescriptor[]; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1RougeSpec; } /** - * The request message for MatchService.ReadIndexDatapoints. + * Spec for rouge instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadIndexDatapointsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1RougeInstance { /** - * The ID of the DeployedIndex that will serve the request. + * Required. Output of the evaluated model. */ - deployedIndexId?: string | null; + prediction?: string | null; /** - * IDs of the datapoints to be searched for. + * Required. Ground truth used to compare against the prediction. */ - ids?: string[] | null; + reference?: string | null; } /** - * The response message for MatchService.ReadIndexDatapoints. + * Rouge metric value for an instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadIndexDatapointsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1RougeMetricValue { /** - * The result list of datapoints. + * Output only. Rouge score. */ - datapoints?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint[]; + score?: number | null; } /** - * Response message for TensorboardService.ReadTensorboardBlobData. + * Results for rouge metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardBlobDataResponse { + export interface Schema$GoogleCloudAiplatformV1beta1RougeResults { /** - * Blob messages containing blob bytes. + * Output only. Rouge metric values. */ - blobs?: Schema$GoogleCloudAiplatformV1beta1TensorboardBlob[]; + rougeMetricValues?: Schema$GoogleCloudAiplatformV1beta1RougeMetricValue[]; } /** - * Response message for TensorboardService.ReadTensorboardSize. + * Spec for rouge score metric - calculates the recall of n-grams in prediction as compared to reference - returns a score ranging between 0 and 1. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardSizeResponse { + export interface Schema$GoogleCloudAiplatformV1beta1RougeSpec { /** - * Payload storage size for the TensorBoard + * Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum. */ - storageSizeByte?: string | null; - } - /** - * Response message for TensorboardService.ReadTensorboardTimeSeriesData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardTimeSeriesDataResponse { + rougeType?: string | null; /** - * The returned time series data. + * Optional. Whether to split summaries while using rougeLsum. */ - timeSeriesData?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesData; - } - /** - * Response message for TensorboardService.ReadTensorboardUsage. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponse { + splitSummaries?: boolean | null; /** - * Maps year-month (YYYYMM) string to per month usage data. + * Optional. Whether to use stemmer to compute rouge score. */ - monthlyUsageData?: { - [ - key: string - ]: Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerMonthUsageData; - } | null; + useStemmer?: boolean | null; } /** - * Per month usage data + * Runtime configuration to run the extension. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerMonthUsageData { + export interface Schema$GoogleCloudAiplatformV1beta1RuntimeConfig { /** - * Usage data for each user in the given month. + * Code execution runtime configurations for code interpreter extension. */ - userUsageData?: Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerUserUsageData[]; - } - /** - * Per user usage data. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ReadTensorboardUsageResponsePerUserUsageData { + codeInterpreterRuntimeConfig?: Schema$GoogleCloudAiplatformV1beta1RuntimeConfigCodeInterpreterRuntimeConfig; /** - * User's username + * Optional. Default parameters that will be set for all the execution of this extension. If specified, the parameter values can be overridden by values in [[ExecuteExtensionRequest.operation_params]] at request time. The struct should be in a form of map with param name as the key and actual param value as the value. E.g. If this operation requires a param "name" to be set to "abc". you can set this to something like {"name": "abc"\}. */ - username?: string | null; + defaultParams?: {[key: string]: any} | null; /** - * Number of times the user has read data within the Tensorboard. + * Runtime configuration for Vertext AI Search extension. */ - viewCount?: string | null; + vertexAiSearchRuntimeConfig?: Schema$GoogleCloudAiplatformV1beta1RuntimeConfigVertexAISearchRuntimeConfig; } - /** - * ReasoningEngine provides a customizable runtime for models to determine which actions to take and in which order. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ReasoningEngine { - /** - * Output only. Timestamp when this ReasoningEngine was created. - */ - createTime?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1RuntimeConfigCodeInterpreterRuntimeConfig { /** - * Optional. The description of the ReasoningEngine. + * Optional. The Cloud Storage bucket for file input of this Extension. If specified, support input from the Cloud Storage bucket. Vertex Extension Custom Code Service Agent should be granted file reader to this bucket. If not specified, the extension will only accept file contents from request body and reject Cloud Storage file inputs. */ - description?: string | null; + fileInputGcsBucket?: string | null; /** - * Required. The display name of the ReasoningEngine. + * Optional. The Cloud Storage bucket for file output of this Extension. If specified, write all output files to the Cloud Storage bucket. Vertex Extension Custom Code Service Agent should be granted file writer to this bucket. If not specified, the file content will be output in response body. */ - displayName?: string | null; + fileOutputGcsBucket?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1RuntimeConfigVertexAISearchRuntimeConfig { /** - * Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time. */ - etag?: string | null; + appId?: string | null; /** - * Identifier. The resource name of the ReasoningEngine. + * [Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project\}/locations/{location\}/collections/{collection\}/engines/{engine\}/servingConfigs/{serving_config\}` */ - name?: string | null; + servingConfigName?: string | null; + } + /** + * Input for safety metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SafetyInput { /** - * Required. Configurations of the ReasoningEngine + * Required. Safety instance. */ - spec?: Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpec; + instance?: Schema$GoogleCloudAiplatformV1beta1SafetyInstance; /** - * Output only. Timestamp when this ReasoningEngine was most recently updated. + * Required. Spec for safety metric. */ - updateTime?: string | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1SafetySpec; } /** - * ReasoningEngine configurations + * Spec for safety instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpec { - /** - * Optional. Declarations for object class methods. - */ - classMethods?: Array<{[key: string]: any}> | null; + export interface Schema$GoogleCloudAiplatformV1beta1SafetyInstance { /** - * Required. User provided package spec of the ReasoningEngine. + * Required. Output of the evaluated model. */ - packageSpec?: Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpecPackageSpec; + prediction?: string | null; } /** - * User provided package spec like pickled object and package requirements. + * Safety rating corresponding to the generated content. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReasoningEngineSpecPackageSpec { + export interface Schema$GoogleCloudAiplatformV1beta1SafetyRating { /** - * Optional. The Cloud Storage URI of the dependency files in tar.gz format. + * Output only. Indicates whether the content was filtered out because of this rating. */ - dependencyFilesGcsUri?: string | null; + blocked?: boolean | null; /** - * Optional. The Cloud Storage URI of the pickled python object. + * Output only. Harm category. */ - pickleObjectGcsUri?: string | null; + category?: string | null; /** - * Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If not specified, default value is 3.10. + * Output only. Harm probability levels in the content. */ - pythonVersion?: string | null; + probability?: string | null; /** - * Optional. The Cloud Storage URI of the `requirements.txt` file + * Output only. Harm probability score. */ - requirementsGcsUri?: string | null; - } - /** - * Details of operations that perform reboot PersistentResource. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RebootPersistentResourceOperationMetadata { + probabilityScore?: number | null; /** - * Operation metadata for PersistentResource. + * Output only. Harm severity levels in the content. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + severity?: string | null; /** - * Progress Message for Reboot LRO + * Output only. Harm severity score. */ - progressMessage?: string | null; + severityScore?: number | null; } /** - * Request message for PersistentResourceService.RebootPersistentResource. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RebootPersistentResourceRequest {} - /** - * Request message for MetadataService.DeleteContextChildrenRequest. + * Spec for safety result. */ - export interface Schema$GoogleCloudAiplatformV1beta1RemoveContextChildrenRequest { + export interface Schema$GoogleCloudAiplatformV1beta1SafetyResult { /** - * The resource names of the child Contexts. + * Output only. Confidence for safety score. */ - childContexts?: string[] | null; - } - /** - * Response message for MetadataService.RemoveContextChildren. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RemoveContextChildrenResponse {} - /** - * Request message for IndexService.RemoveDatapoints - */ - export interface Schema$GoogleCloudAiplatformV1beta1RemoveDatapointsRequest { + confidence?: number | null; /** - * A list of datapoint ids to be deleted. + * Output only. Explanation for safety score. */ - datapointIds?: string[] | null; + explanation?: string | null; + /** + * Output only. Safety score. + */ + score?: number | null; } /** - * Response message for IndexService.RemoveDatapoints - */ - export interface Schema$GoogleCloudAiplatformV1beta1RemoveDatapointsResponse {} - /** - * Request message for NotebookInternalService.ReportExecutionEvent. + * Safety settings. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReportExecutionEventRequest { + export interface Schema$GoogleCloudAiplatformV1beta1SafetySetting { /** - * Required. The type of the event. + * Required. Harm category. */ - eventType?: string | null; + category?: string | null; /** - * Optional. The error details of the event. + * Optional. Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score. */ - status?: Schema$GoogleRpcStatus; + method?: string | null; /** - * Required. The VM identity token (a JWT) for authenticating the VM. https://cloud.google.com/compute/docs/instances/verifying-instance-identity + * Required. The harm block threshold. */ - vmToken?: string | null; + threshold?: string | null; } /** - * Response message for NotebookInternalService.ReportExecutionEvent. + * Spec for safety metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReportExecutionEventResponse {} + export interface Schema$GoogleCloudAiplatformV1beta1SafetySpec { + /** + * Optional. Which version to use for evaluation. + */ + version?: number | null; + } /** - * Request message for NotebookInternalService.ReportRuntimeEvent. + * Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReportRuntimeEventRequest { + export interface Schema$GoogleCloudAiplatformV1beta1SampleConfig { /** - * Optional. The details of the request for debug. + * The percentage of data needed to be labeled in each following batch (except the first batch). */ - eventDetails?: {[key: string]: string} | null; + followingBatchSamplePercentage?: number | null; /** - * Required. The type of the event. + * The percentage of data needed to be labeled in the first batch. */ - eventType?: string | null; + initialBatchSamplePercentage?: number | null; /** - * The details of the internal os service states. + * Field to choose sampling strategy. Sampling strategy will decide which data should be selected for human labeling in every batch. */ - internalOsServiceStateInstance?: Schema$GoogleCloudAiplatformV1beta1InternalOsServiceStateInstance[]; + sampleStrategy?: string | null; + } + /** + * An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SampledShapleyAttribution { /** - * Optional. The details of the internal os service states. + * Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. */ - internalOsServiceStateInstances?: Schema$GoogleCloudAiplatformV1beta1InternalOsServiceStateInstance[]; + pathCount?: number | null; + } + /** + * Sampling Strategy for logging, can be for both training and prediction dataset. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SamplingStrategy { /** - * Required. The VM identity token (a JWT) for authenticating the VM. https://cloud.google.com/compute/docs/instances/verifying-instance-identity + * Random sample config. Will support more sampling strategies later. */ - vmToken?: string | null; + randomSampleConfig?: Schema$GoogleCloudAiplatformV1beta1SamplingStrategyRandomSampleConfig; } /** - * Response message for NotebookInternalService.ReportRuntimeEvent. + * Requests are randomly selected. */ - export interface Schema$GoogleCloudAiplatformV1beta1ReportRuntimeEventResponse { + export interface Schema$GoogleCloudAiplatformV1beta1SamplingStrategyRandomSampleConfig { /** - * If the idle shutdown is blocked by CP, CP will send the block message. Otherwise, this field is not set. + * Sample rate (0, 1] */ - idleShutdownMessage?: string | null; + sampleRate?: number | null; } /** - * Represents the spec of a group of resources of the same type, for example machine type, disk, and accelerators, in a PersistentResource. + * A SavedQuery is a view of the dataset. It references a subset of annotations by problem type and filters. */ - export interface Schema$GoogleCloudAiplatformV1beta1ResourcePool { + export interface Schema$GoogleCloudAiplatformV1beta1SavedQuery { /** - * Optional. Optional spec to configure GKE autoscaling + * Output only. Filters on the Annotations in the dataset. */ - autoscalingSpec?: Schema$GoogleCloudAiplatformV1beta1ResourcePoolAutoscalingSpec; + annotationFilter?: string | null; /** - * Optional. Disk spec for the machine in this node pool. + * Output only. Number of AnnotationSpecs in the context of the SavedQuery. */ - diskSpec?: Schema$GoogleCloudAiplatformV1beta1DiskSpec; + annotationSpecCount?: number | null; /** - * Immutable. The unique ID in a PersistentResource for referring to this resource pool. User can specify it if necessary. Otherwise, it's generated automatically. + * Output only. Timestamp when this SavedQuery was created. */ - id?: string | null; + createTime?: string | null; /** - * Required. Immutable. The specification of a single machine. + * Required. The user-defined name of the SavedQuery. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; + displayName?: string | null; /** - * Optional. The total number of machines to use for this resource pool. + * Used to perform a consistent read-modify-write update. If not set, a blind "overwrite" update happens. */ - replicaCount?: string | null; + etag?: string | null; /** - * Output only. The number of machines currently in use by training jobs for this resource pool. Will replace idle_replica_count. + * Some additional information about the SavedQuery. */ - usedReplicaCount?: string | null; - } - /** - * The min/max number of replicas allowed if enabling autoscaling - */ - export interface Schema$GoogleCloudAiplatformV1beta1ResourcePoolAutoscalingSpec { + metadata?: any | null; /** - * Optional. max replicas in the node pool, must be ≥ replica_count and \> min_replica_count or will throw error + * Output only. Resource name of the SavedQuery. */ - maxReplicaCount?: string | null; + name?: string | null; /** - * Optional. min replicas in the node pool, must be ≤ replica_count and < max_replica_count or will throw error + * Required. Problem type of the SavedQuery. Allowed values: * IMAGE_CLASSIFICATION_SINGLE_LABEL * IMAGE_CLASSIFICATION_MULTI_LABEL * IMAGE_BOUNDING_POLY * IMAGE_BOUNDING_BOX * TEXT_CLASSIFICATION_SINGLE_LABEL * TEXT_CLASSIFICATION_MULTI_LABEL * TEXT_EXTRACTION * TEXT_SENTIMENT * VIDEO_CLASSIFICATION * VIDEO_OBJECT_TRACKING */ - minReplicaCount?: string | null; - } - /** - * Persistent Cluster runtime information as output - */ - export interface Schema$GoogleCloudAiplatformV1beta1ResourceRuntime { + problemType?: string | null; /** - * Output only. URIs for user to connect to the Cluster. Example: { "RAY_HEAD_NODE_INTERNAL_IP": "head-node-IP:10001" "RAY_DASHBOARD_URI": "ray-dashboard-address:8888" \} + * Output only. If the Annotations belonging to the SavedQuery can be used for AutoML training. */ - accessUris?: {[key: string]: string} | null; + supportAutomlTraining?: boolean | null; /** - * Output only. The resource name of NotebookRuntimeTemplate for the RoV Persistent Cluster The NotebokRuntimeTemplate is created in the same VPC (if set), and with the same Ray and Python version as the Persistent Cluster. Example: "projects/1000/locations/us-central1/notebookRuntimeTemplates/abc123" + * Output only. Timestamp when SavedQuery was last updated. */ - notebookRuntimeTemplate?: string | null; + updateTime?: string | null; } /** - * Configuration for the runtime on a PersistentResource instance, including but not limited to: * Service accounts used to run the workloads. * Whether to make it a dedicated Ray Cluster. + * One point viewable on a scalar metric plot. */ - export interface Schema$GoogleCloudAiplatformV1beta1ResourceRuntimeSpec { - /** - * Optional. Ray cluster configuration. Required when creating a dedicated RayCluster on the PersistentResource. - */ - raySpec?: Schema$GoogleCloudAiplatformV1beta1RaySpec; + export interface Schema$GoogleCloudAiplatformV1beta1Scalar { /** - * Optional. Configure the use of workload identity on the PersistentResource + * Value of the point at this step / timestamp. */ - serviceAccountSpec?: Schema$GoogleCloudAiplatformV1beta1ServiceAccountSpec; + value?: number | null; } /** - * Statistics information about resource consumption. + * An instance of a Schedule periodically schedules runs to make API calls based on user specified time specification and API request type. */ - export interface Schema$GoogleCloudAiplatformV1beta1ResourcesConsumed { + export interface Schema$GoogleCloudAiplatformV1beta1Schedule { /** - * Output only. The number of replica hours used. Note that many replicas may run in parallel, and additionally any given work may be queued for some time. Therefore this value is not strictly related to wall time. + * Optional. Whether new scheduled runs can be queued when max_concurrent_runs limit is reached. If set to true, new runs will be queued instead of skipped. Default to false. */ - replicaHours?: number | null; - } - /** - * Runtime operation information for DatasetService.RestoreDatasetVersion. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RestoreDatasetVersionOperationMetadata { + allowQueueing?: boolean | null; /** - * The common part of the operation metadata. + * Output only. Whether to backfill missed runs when the schedule is resumed from PAUSED state. If set to true, all missed runs will be scheduled. New runs will be scheduled after the backfill is complete. Default to false. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for JobService.ResumeModelDeploymentMonitoringJob. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ResumeModelDeploymentMonitoringJobRequest {} - /** - * Request message for ScheduleService.ResumeSchedule. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ResumeScheduleRequest { + catchUp?: boolean | null; /** - * Optional. Whether to backfill missed runs when the schedule is resumed from PAUSED state. If set to true, all missed runs will be scheduled. New runs will be scheduled after the backfill is complete. This will also update Schedule.catch_up field. Default to false. + * Request for ModelMonitoringService.CreateModelMonitoringJob. */ - catchUp?: boolean | null; - } - /** - * Defines a retrieval tool that model can call to access external knowledge. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Retrieval { + createModelMonitoringJobRequest?: Schema$GoogleCloudAiplatformV1beta1CreateModelMonitoringJobRequest; /** - * Optional. Disable using the result from this tool in detecting grounding attribution. This does not affect how the result is given to the model for generation. + * Request for NotebookService.CreateNotebookExecutionJob. */ - disableAttribution?: boolean | null; + createNotebookExecutionJobRequest?: Schema$GoogleCloudAiplatformV1beta1CreateNotebookExecutionJobRequest; /** - * Set to use data source powered by Vertex AI Search. + * Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project\}/locations/{location\}). */ - vertexAiSearch?: Schema$GoogleCloudAiplatformV1beta1VertexAISearch; + createPipelineJobRequest?: Schema$GoogleCloudAiplatformV1beta1CreatePipelineJobRequest; /** - * Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. + * Output only. Timestamp when this Schedule was created. */ - vertexRagStore?: Schema$GoogleCloudAiplatformV1beta1VertexRagStore; - } - /** - * Request message for VertexRagService.RetrieveContexts. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequest { + createTime?: string | null; /** - * Required. Single RAG retrieve query. + * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE\}" or "TZ=${IANA_TIME_ZONE\}". The ${IANA_TIME_ZONE\} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". */ - query?: Schema$GoogleCloudAiplatformV1beta1RagQuery; + cron?: string | null; /** - * The data source for Vertex RagStore. + * Required. User provided name of the Schedule. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - vertexRagStore?: Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore; - } - /** - * The data source for Vertex RagStore. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore { + displayName?: string | null; /** - * Optional. Deprecated. Please use rag_resources to specify the data source. + * Optional. Timestamp after which no new runs can be scheduled. If specified, The schedule will be completed when either end_time is reached or when scheduled_run_count \>= max_run_count. If not specified, new runs will keep getting scheduled until this Schedule is paused or deleted. Already scheduled runs will be allowed to complete. Unset if not specified. */ - ragCorpora?: string[] | null; + endTime?: string | null; /** - * Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. + * Output only. Timestamp when this Schedule was last paused. Unset if never paused. */ - ragResources?: Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStoreRagResource[]; + lastPauseTime?: string | null; /** - * Optional. Only return contexts with vector distance smaller than the threshold. + * Output only. Timestamp when this Schedule was last resumed. Unset if never resumed from pause. */ - vectorDistanceThreshold?: number | null; - } - /** - * The definition of the Rag resource. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStoreRagResource { + lastResumeTime?: string | null; /** - * Optional. RagCorpora resource name. Format: `projects/{project\}/locations/{location\}/ragCorpora/{rag_corpus\}` + * Output only. Response of the last scheduled run. This is the response for starting the scheduled requests and not the execution of the operations/jobs created by the requests (if applicable). Unset if no run has been scheduled yet. */ - ragCorpus?: string | null; + lastScheduledRunResponse?: Schema$GoogleCloudAiplatformV1beta1ScheduleRunResponse; /** - * Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. + * Required. Maximum number of runs that can be started concurrently for this Schedule. This is the limit for starting the scheduled requests and not the execution of the operations/jobs created by the requests (if applicable). */ - ragFileIds?: string[] | null; - } - /** - * Response message for VertexRagService.RetrieveContexts. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RetrieveContextsResponse { + maxConcurrentRunCount?: string | null; /** - * The contexts of the query. + * Optional. Maximum run count of the schedule. If specified, The schedule will be completed when either started_run_count \>= max_run_count or when end_time is reached. If not specified, new runs will keep getting scheduled until this Schedule is paused or deleted. Already scheduled runs will be allowed to complete. Unset if not specified. */ - contexts?: Schema$GoogleCloudAiplatformV1beta1RagContexts; - } - /** - * Input for rouge metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RougeInput { + maxRunCount?: string | null; /** - * Required. Repeated rouge instances. + * Immutable. The resource name of the Schedule. */ - instances?: Schema$GoogleCloudAiplatformV1beta1RougeInstance[]; + name?: string | null; /** - * Required. Spec for rouge score metric. + * Output only. Timestamp when this Schedule should schedule the next run. Having a next_run_time in the past means the runs are being started behind schedule. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1RougeSpec; - } - /** - * Spec for rouge instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RougeInstance { + nextRunTime?: string | null; /** - * Required. Output of the evaluated model. + * Output only. The number of runs started by this schedule. */ - prediction?: string | null; + startedRunCount?: string | null; /** - * Required. Ground truth used to compare against the prediction. + * Optional. Timestamp after which the first run can be scheduled. Default to Schedule create time if not specified. */ - reference?: string | null; - } - /** - * Rouge metric value for an instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1RougeMetricValue { + startTime?: string | null; /** - * Output only. Rouge score. + * Output only. The state of this Schedule. */ - score?: number | null; + state?: string | null; + /** + * Output only. Timestamp when this Schedule was updated. + */ + updateTime?: string | null; } /** - * Results for rouge metric. + * Status of a scheduled run. */ - export interface Schema$GoogleCloudAiplatformV1beta1RougeResults { + export interface Schema$GoogleCloudAiplatformV1beta1ScheduleRunResponse { /** - * Output only. Rouge metric values. + * The response of the scheduled run. */ - rougeMetricValues?: Schema$GoogleCloudAiplatformV1beta1RougeMetricValue[]; + runResponse?: string | null; + /** + * The scheduled run time based on the user-specified schedule. + */ + scheduledRunTime?: string | null; } /** - * Spec for rouge score metric - calculates the recall of n-grams in prediction as compared to reference - returns a score ranging between 0 and 1. + * All parameters related to queuing and scheduling of custom jobs. */ - export interface Schema$GoogleCloudAiplatformV1beta1RougeSpec { + export interface Schema$GoogleCloudAiplatformV1beta1Scheduling { /** - * Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum. + * Optional. Indicates if the job should retry for internal errors after the job starts running. If true, overrides `Scheduling.restart_job_on_worker_restart` to false. */ - rougeType?: string | null; + disableRetries?: boolean | null; /** - * Optional. Whether to split summaries while using rougeLsum. + * Restarts the entire CustomJob if a worker gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job. */ - splitSummaries?: boolean | null; + restartJobOnWorkerRestart?: boolean | null; /** - * Optional. Whether to use stemmer to compute rouge score. + * The maximum job running time. The default is 7 days. */ - useStemmer?: boolean | null; + timeout?: string | null; } /** - * Runtime configuration to run the extension. + * Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. */ - export interface Schema$GoogleCloudAiplatformV1beta1RuntimeConfig { + export interface Schema$GoogleCloudAiplatformV1beta1Schema { /** - * Code execution runtime configurations for code interpreter extension. + * Optional. Default value of the data. */ - codeInterpreterRuntimeConfig?: Schema$GoogleCloudAiplatformV1beta1RuntimeConfigCodeInterpreterRuntimeConfig; + default?: any | null; /** - * Optional. Default parameters that will be set for all the execution of this extension. If specified, the parameter values can be overridden by values in [[ExecuteExtensionRequest.operation_params]] at request time. The struct should be in a form of map with param name as the key and actual param value as the value. E.g. If this operation requires a param "name" to be set to "abc". you can set this to something like {"name": "abc"\}. + * Optional. The description of the data. */ - defaultParams?: {[key: string]: any} | null; + description?: string | null; /** - * Runtime configuration for Vertext AI Search extension. + * Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]\} */ - vertexAiSearchRuntimeConfig?: Schema$GoogleCloudAiplatformV1beta1RuntimeConfigVertexAISearchRuntimeConfig; - } - export interface Schema$GoogleCloudAiplatformV1beta1RuntimeConfigCodeInterpreterRuntimeConfig { + enum?: string[] | null; /** - * Optional. The Cloud Storage bucket for file input of this Extension. If specified, support input from the Cloud Storage bucket. Vertex Extension Custom Code Service Agent should be granted file reader to this bucket. If not specified, the extension will only accept file contents from request body and reject Cloud Storage file inputs. + * Optional. Example of the object. Will only populated when the object is the root. */ - fileInputGcsBucket?: string | null; + example?: any | null; /** - * Optional. The Cloud Storage bucket for file output of this Extension. If specified, write all output files to the Cloud Storage bucket. Vertex Extension Custom Code Service Agent should be granted file writer to this bucket. If not specified, the file content will be output in response body. + * Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc */ - fileOutputGcsBucket?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1RuntimeConfigVertexAISearchRuntimeConfig { + format?: string | null; /** - * Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time. + * Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. */ - appId?: string | null; + items?: Schema$GoogleCloudAiplatformV1beta1Schema; /** - * [Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project\}/locations/{location\}/collections/{collection\}/engines/{engine\}/servingConfigs/{serving_config\}` + * Optional. Maximum value of the Type.INTEGER and Type.NUMBER */ - servingConfigName?: string | null; - } - /** - * Input for safety metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SafetyInput { + maximum?: number | null; /** - * Required. Safety instance. + * Optional. Maximum number of the elements for Type.ARRAY. */ - instance?: Schema$GoogleCloudAiplatformV1beta1SafetyInstance; + maxItems?: string | null; /** - * Required. Spec for safety metric. + * Optional. Maximum length of the Type.STRING */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1SafetySpec; - } - /** - * Spec for safety instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SafetyInstance { + maxLength?: string | null; /** - * Required. Output of the evaluated model. + * Optional. Maximum number of the properties for Type.OBJECT. */ - prediction?: string | null; - } - /** - * Safety rating corresponding to the generated content. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SafetyRating { + maxProperties?: string | null; /** - * Output only. Indicates whether the content was filtered out because of this rating. + * Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER */ - blocked?: boolean | null; + minimum?: number | null; /** - * Output only. Harm category. + * Optional. Minimum number of the elements for Type.ARRAY. */ - category?: string | null; + minItems?: string | null; /** - * Output only. Harm probability levels in the content. + * Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING */ - probability?: string | null; + minLength?: string | null; /** - * Output only. Harm probability score. + * Optional. Minimum number of the properties for Type.OBJECT. */ - probabilityScore?: number | null; + minProperties?: string | null; /** - * Output only. Harm severity levels in the content. + * Optional. Indicates if the value may be null. */ - severity?: string | null; + nullable?: boolean | null; /** - * Output only. Harm severity score. + * Optional. Pattern of the Type.STRING to restrict a string to a regular expression. */ - severityScore?: number | null; - } - /** - * Spec for safety result. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SafetyResult { + pattern?: string | null; /** - * Output only. Confidence for safety score. + * Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. */ - confidence?: number | null; + properties?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1Schema; + } | null; /** - * Output only. Explanation for safety score. + * Optional. Required properties of Type.OBJECT. */ - explanation?: string | null; + required?: string[] | null; /** - * Output only. Safety score. + * Optional. The title of the Schema. */ - score?: number | null; + title?: string | null; + /** + * Optional. The type of the data. + */ + type?: string | null; } /** - * Safety settings. + * An entry of mapping between color and AnnotationSpec. The mapping is used in segmentation mask. */ - export interface Schema$GoogleCloudAiplatformV1beta1SafetySetting { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaAnnotationSpecColor { /** - * Required. Harm category. + * The color of the AnnotationSpec in a segmentation mask. */ - category?: string | null; + color?: Schema$GoogleTypeColor; /** - * Optional. Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score. + * The display name of the AnnotationSpec represented by the color in the segmentation mask. */ - method?: string | null; + displayName?: string | null; /** - * Required. The harm block threshold. + * The ID of the AnnotationSpec represented by the color in the segmentation mask. */ - threshold?: string | null; + id?: string | null; } /** - * Spec for safety metric. + * Annotation details specific to image object detection. */ - export interface Schema$GoogleCloudAiplatformV1beta1SafetySpec { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageBoundingBoxAnnotation { /** - * Optional. Which version to use for evaluation. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - version?: number | null; - } - /** - * Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SampleConfig { + annotationSpecId?: string | null; /** - * The percentage of data needed to be labeled in each following batch (except the first batch). + * The display name of the AnnotationSpec that this Annotation pertains to. */ - followingBatchSamplePercentage?: number | null; + displayName?: string | null; /** - * The percentage of data needed to be labeled in the first batch. + * The rightmost coordinate of the bounding box. */ - initialBatchSamplePercentage?: number | null; + xMax?: number | null; /** - * Field to choose sampling strategy. Sampling strategy will decide which data should be selected for human labeling in every batch. + * The leftmost coordinate of the bounding box. */ - sampleStrategy?: string | null; - } - /** - * An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SampledShapleyAttribution { + xMin?: number | null; /** - * Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively. + * The bottommost coordinate of the bounding box. */ - pathCount?: number | null; + yMax?: number | null; + /** + * The topmost coordinate of the bounding box. + */ + yMin?: number | null; } /** - * Sampling Strategy for logging, can be for both training and prediction dataset. + * Annotation details specific to image classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SamplingStrategy { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageClassificationAnnotation { /** - * Random sample config. Will support more sampling strategies later. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - randomSampleConfig?: Schema$GoogleCloudAiplatformV1beta1SamplingStrategyRandomSampleConfig; + annotationSpecId?: string | null; + /** + * The display name of the AnnotationSpec that this Annotation pertains to. + */ + displayName?: string | null; } /** - * Requests are randomly selected. + * Payload of Image DataItem. */ - export interface Schema$GoogleCloudAiplatformV1beta1SamplingStrategyRandomSampleConfig { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageDataItem { /** - * Sample rate (0, 1] + * Required. Google Cloud Storage URI points to the original image in user's bucket. The image is up to 30MB in size. */ - sampleRate?: number | null; + gcsUri?: string | null; + /** + * Output only. The mime type of the content of the image. Only the images in below listed mime types are supported. - image/jpeg - image/gif - image/png - image/webp - image/bmp - image/tiff - image/vnd.microsoft.icon + */ + mimeType?: string | null; } /** - * A SavedQuery is a view of the dataset. It references a subset of annotations by problem type and filters. + * The metadata of Datasets that contain Image DataItems. */ - export interface Schema$GoogleCloudAiplatformV1beta1SavedQuery { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageDatasetMetadata { /** - * Output only. Filters on the Annotations in the dataset. + * Points to a YAML file stored on Google Cloud Storage describing payload of the Image DataItems that belong to this Dataset. */ - annotationFilter?: string | null; + dataItemSchemaUri?: string | null; /** - * Output only. Number of AnnotationSpecs in the context of the SavedQuery. + * Google Cloud Storage Bucket name that contains the blob data of this Dataset. */ - annotationSpecCount?: number | null; + gcsBucket?: string | null; + } + /** + * Annotation details specific to image segmentation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotation { /** - * Output only. Timestamp when this SavedQuery was created. + * Mask based segmentation annotation. Only one mask annotation can exist for one image. */ - createTime?: string | null; + maskAnnotation?: Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationMaskAnnotation; /** - * Required. The user-defined name of the SavedQuery. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Polygon annotation. */ - displayName?: string | null; + polygonAnnotation?: Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolygonAnnotation; /** - * Used to perform a consistent read-modify-write update. If not set, a blind "overwrite" update happens. + * Polyline annotation. */ - etag?: string | null; + polylineAnnotation?: Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolylineAnnotation; + } + /** + * The mask based segmentation annotation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationMaskAnnotation { /** - * Some additional information about the SavedQuery. + * The mapping between color and AnnotationSpec for this Annotation. */ - metadata?: any | null; + annotationSpecColors?: Schema$GoogleCloudAiplatformV1beta1SchemaAnnotationSpecColor[]; /** - * Output only. Resource name of the SavedQuery. + * Google Cloud Storage URI that points to the mask image. The image must be in PNG format. It must have the same size as the DataItem's image. Each pixel in the image mask represents the AnnotationSpec which the pixel in the image DataItem belong to. Each color is mapped to one AnnotationSpec based on annotation_spec_colors. */ - name?: string | null; + maskGcsUri?: string | null; + } + /** + * Represents a polygon in image. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolygonAnnotation { /** - * Required. Problem type of the SavedQuery. Allowed values: * IMAGE_CLASSIFICATION_SINGLE_LABEL * IMAGE_CLASSIFICATION_MULTI_LABEL * IMAGE_BOUNDING_POLY * IMAGE_BOUNDING_BOX * TEXT_CLASSIFICATION_SINGLE_LABEL * TEXT_CLASSIFICATION_MULTI_LABEL * TEXT_EXTRACTION * TEXT_SENTIMENT * VIDEO_CLASSIFICATION * VIDEO_OBJECT_TRACKING + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - problemType?: string | null; + annotationSpecId?: string | null; /** - * Output only. If the Annotations belonging to the SavedQuery can be used for AutoML training. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - supportAutomlTraining?: boolean | null; + displayName?: string | null; /** - * Output only. Timestamp when SavedQuery was last updated. + * The vertexes are connected one by one and the last vertex is connected to the first one to represent a polygon. */ - updateTime?: string | null; + vertexes?: Schema$GoogleCloudAiplatformV1beta1SchemaVertex[]; } /** - * One point viewable on a scalar metric plot. + * Represents a polyline in image. */ - export interface Schema$GoogleCloudAiplatformV1beta1Scalar { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolylineAnnotation { /** - * Value of the point at this step / timestamp. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - value?: number | null; + annotationSpecId?: string | null; + /** + * The display name of the AnnotationSpec that this Annotation pertains to. + */ + displayName?: string | null; + /** + * The vertexes are connected one by one and the last vertex in not connected to the first one. + */ + vertexes?: Schema$GoogleCloudAiplatformV1beta1SchemaVertex[]; } /** - * An instance of a Schedule periodically schedules runs to make API calls based on user specified time specification and API request type. + * Bounding box matching model metrics for a single intersection-over-union threshold and multiple label match confidence thresholds. */ - export interface Schema$GoogleCloudAiplatformV1beta1Schedule { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetrics { /** - * Optional. Whether new scheduled runs can be queued when max_concurrent_runs limit is reached. If set to true, new runs will be queued instead of skipped. Default to false. + * Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived from them. */ - allowQueueing?: boolean | null; + confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetricsConfidenceMetrics[]; /** - * Output only. Whether to backfill missed runs when the schedule is resumed from PAUSED state. If set to true, all missed runs will be scheduled. New runs will be scheduled after the backfill is complete. Default to false. + * The intersection-over-union threshold value used to compute this metrics entry. */ - catchUp?: boolean | null; + iouThreshold?: number | null; /** - * Request for ModelMonitoringService.CreateModelMonitoringJob. + * The mean average precision, most often close to `auPrc`. */ - createModelMonitoringJobRequest?: Schema$GoogleCloudAiplatformV1beta1CreateModelMonitoringJobRequest; + meanAveragePrecision?: number | null; + } + /** + * Metrics for a single confidence threshold. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetricsConfidenceMetrics { /** - * Request for NotebookService.CreateNotebookExecutionJob. + * The confidence threshold value used to compute the metrics. */ - createNotebookExecutionJobRequest?: Schema$GoogleCloudAiplatformV1beta1CreateNotebookExecutionJobRequest; + confidenceThreshold?: number | null; /** - * Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project\}/locations/{location\}). + * The harmonic mean of recall and precision. */ - createPipelineJobRequest?: Schema$GoogleCloudAiplatformV1beta1CreatePipelineJobRequest; + f1Score?: number | null; /** - * Output only. Timestamp when this Schedule was created. + * Precision under the given confidence threshold. */ - createTime?: string | null; + precision?: number | null; /** - * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE\}" or "TZ=${IANA_TIME_ZONE\}". The ${IANA_TIME_ZONE\} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". + * Recall under the given confidence threshold. */ - cron?: string | null; + recall?: number | null; + } + /** + * Metrics for classification evaluation results. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsClassificationEvaluationMetrics { /** - * Required. User provided name of the Schedule. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * The Area Under Precision-Recall Curve metric. Micro-averaged for the overall evaluation. */ - displayName?: string | null; + auPrc?: number | null; /** - * Optional. Timestamp after which no new runs can be scheduled. If specified, The schedule will be completed when either end_time is reached or when scheduled_run_count \>= max_run_count. If not specified, new runs will keep getting scheduled until this Schedule is paused or deleted. Already scheduled runs will be allowed to complete. Unset if not specified. + * The Area Under Receiver Operating Characteristic curve metric. Micro-averaged for the overall evaluation. */ - endTime?: string | null; + auRoc?: number | null; /** - * Output only. Timestamp when this Schedule was last paused. Unset if never paused. + * Metrics for each `confidenceThreshold` in 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and `positionThreshold` = INT32_MAX_VALUE. ROC and precision-recall curves, and other aggregated metrics are derived from them. The confidence metrics entries may also be supplied for additional values of `positionThreshold`, but from these no aggregated metrics are computed. */ - lastPauseTime?: string | null; + confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics[]; /** - * Output only. Timestamp when this Schedule was last resumed. Unset if never resumed from pause. + * Confusion matrix of the evaluation. */ - lastResumeTime?: string | null; + confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; /** - * Output only. Response of the last scheduled run. This is the response for starting the scheduled requests and not the execution of the operations/jobs created by the requests (if applicable). Unset if no run has been scheduled yet. + * The Log Loss metric. */ - lastScheduledRunResponse?: Schema$GoogleCloudAiplatformV1beta1ScheduleRunResponse; + logLoss?: number | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics { /** - * Required. Maximum number of runs that can be started concurrently for this Schedule. This is the limit for starting the scheduled requests and not the execution of the operations/jobs created by the requests (if applicable). + * Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. */ - maxConcurrentRunCount?: string | null; + confidenceThreshold?: number | null; /** - * Optional. Maximum run count of the schedule. If specified, The schedule will be completed when either started_run_count \>= max_run_count or when end_time is reached. If not specified, new runs will keep getting scheduled until this Schedule is paused or deleted. Already scheduled runs will be allowed to complete. Unset if not specified. + * Confusion matrix of the evaluation for this confidence_threshold. */ - maxRunCount?: string | null; + confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; /** - * Immutable. The resource name of the Schedule. + * The harmonic mean of recall and precision. For summary metrics, it computes the micro-averaged F1 score. */ - name?: string | null; + f1Score?: number | null; /** - * Output only. Timestamp when this Schedule should schedule the next run. Having a next_run_time in the past means the runs are being started behind schedule. + * The harmonic mean of recallAt1 and precisionAt1. */ - nextRunTime?: string | null; + f1ScoreAt1?: number | null; /** - * Output only. The number of runs started by this schedule. + * Macro-averaged F1 Score. */ - startedRunCount?: string | null; + f1ScoreMacro?: number | null; /** - * Optional. Timestamp after which the first run can be scheduled. Default to Schedule create time if not specified. + * Micro-averaged F1 Score. */ - startTime?: string | null; + f1ScoreMicro?: number | null; /** - * Output only. The state of this Schedule. + * The number of ground truth labels that are not matched by a Model created label. */ - state?: string | null; + falseNegativeCount?: string | null; /** - * Output only. Timestamp when this Schedule was updated. + * The number of Model created labels that do not match a ground truth label. */ - updateTime?: string | null; - } - /** - * Status of a scheduled run. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ScheduleRunResponse { + falsePositiveCount?: string | null; /** - * The response of the scheduled run. + * False Positive Rate for the given confidence threshold. */ - runResponse?: string | null; + falsePositiveRate?: number | null; /** - * The scheduled run time based on the user-specified schedule. + * The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. */ - scheduledRunTime?: string | null; - } - /** - * All parameters related to queuing and scheduling of custom jobs. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Scheduling { + falsePositiveRateAt1?: number | null; /** - * Optional. Indicates if the job should retry for internal errors after the job starts running. If true, overrides `Scheduling.restart_job_on_worker_restart` to false. + * Metrics are computed with an assumption that the Model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the `confidenceThreshold`. */ - disableRetries?: boolean | null; + maxPredictions?: number | null; /** - * Restarts the entire CustomJob if a worker gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job. + * Precision for the given confidence threshold. */ - restartJobOnWorkerRestart?: boolean | null; + precision?: number | null; /** - * The maximum job running time. The default is 7 days. + * The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. */ - timeout?: string | null; - } - /** - * Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Schema { + precisionAt1?: number | null; /** - * Optional. Default value of the data. + * Recall (True Positive Rate) for the given confidence threshold. */ - default?: any | null; + recall?: number | null; /** - * Optional. The description of the data. + * The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. */ - description?: string | null; + recallAt1?: number | null; /** - * Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]\} + * The number of labels that were not created by the Model, but if they would, they would not match a ground truth label. */ - enum?: string[] | null; + trueNegativeCount?: string | null; /** - * Optional. Example of the object. Will only populated when the object is the root. + * The number of Model created labels that match a ground truth label. */ - example?: any | null; + truePositiveCount?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix { /** - * Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + * AnnotationSpecs used in the confusion matrix. For AutoML Text Extraction, a special negative AnnotationSpec with empty `id` and `displayName` of "NULL" will be added as the last element. */ - format?: string | null; + annotationSpecs?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef[]; /** - * Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + * Rows in the confusion matrix. The number of rows is equal to the size of `annotationSpecs`. `rowsi` is the number of DataItems that have ground truth of the `annotationSpecs[i]` and are predicted as `annotationSpecs[j]` by the Model being evaluated. For Text Extraction, when `annotationSpecs[i]` is the last element in `annotationSpecs`, i.e. the special negative AnnotationSpec, `rowsi` is the number of predicted entities of `annoatationSpec[j]` that are not labeled as any of the ground truth AnnotationSpec. When annotationSpecs[j] is the special negative AnnotationSpec, `rowsi` is the number of entities have ground truth of `annotationSpec[i]` that are not predicted as an entity by the Model. The value of the last cell, i.e. `rowi` where i == j and `annotationSpec[i]` is the special negative AnnotationSpec, is always 0. */ - items?: Schema$GoogleCloudAiplatformV1beta1Schema; + rows?: any[][] | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef { /** - * Optional. Maximum value of the Type.INTEGER and Type.NUMBER + * Display name of the AnnotationSpec. */ - maximum?: number | null; + displayName?: string | null; /** - * Optional. Maximum number of the elements for Type.ARRAY. + * ID of the AnnotationSpec. */ - maxItems?: string | null; + id?: string | null; + } + /** + * Metrics for forecasting evaluation results. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsForecastingEvaluationMetrics { /** - * Optional. Maximum length of the Type.STRING + * Mean Absolute Error (MAE). */ - maxLength?: string | null; + meanAbsoluteError?: number | null; /** - * Optional. Maximum number of the properties for Type.OBJECT. + * Mean absolute percentage error. Infinity when there are zeros in the ground truth. */ - maxProperties?: string | null; + meanAbsolutePercentageError?: number | null; /** - * Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + * The quantile metrics entries for each quantile. */ - minimum?: number | null; + quantileMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsForecastingEvaluationMetricsQuantileMetricsEntry[]; /** - * Optional. Minimum number of the elements for Type.ARRAY. + * Root Mean Squared Error (RMSE). */ - minItems?: string | null; + rootMeanSquaredError?: number | null; /** - * Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + * Root mean squared log error. Undefined when there are negative ground truth values or predictions. */ - minLength?: string | null; + rootMeanSquaredLogError?: number | null; /** - * Optional. Minimum number of the properties for Type.OBJECT. + * Root Mean Square Percentage Error. Square root of MSPE. Undefined/imaginary when MSPE is negative. */ - minProperties?: string | null; + rootMeanSquaredPercentageError?: number | null; /** - * Optional. Indicates if the value may be null. + * Coefficient of determination as Pearson correlation coefficient. Undefined when ground truth or predictions are constant or near constant. */ - nullable?: boolean | null; + rSquared?: number | null; /** - * Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + * Weighted Absolute Percentage Error. Does not use weights, this is just what the metric is called. Undefined if actual values sum to zero. Will be very large if actual values sum to a very small number. */ - pattern?: string | null; + weightedAbsolutePercentageError?: number | null; + } + /** + * Entry for the Quantiles loss type optimization objective. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsForecastingEvaluationMetricsQuantileMetricsEntry { /** - * Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + * This is a custom metric that calculates the percentage of true values that were less than the predicted value for that quantile. Only populated when optimization_objective is minimize-quantile-loss and each entry corresponds to an entry in quantiles The percent value can be used to compare with the quantile value, which is the target value. */ - properties?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1Schema; - } | null; + observedQuantile?: number | null; /** - * Optional. Required properties of Type.OBJECT. + * The quantile for this entry. */ - required?: string[] | null; + quantile?: number | null; /** - * Optional. The title of the Schema. + * The scaled pinball loss of this quantile. */ - title?: string | null; + scaledPinballLoss?: number | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsGeneralTextGenerationEvaluationMetrics { /** - * Optional. The type of the data. + * BLEU (bilingual evaluation understudy) scores based on sacrebleu implementation. */ - type?: string | null; + bleu?: number | null; + /** + * ROUGE-L (Longest Common Subsequence) scoring at summary level. + */ + rougeLSum?: number | null; } /** - * An entry of mapping between color and AnnotationSpec. The mapping is used in segmentation mask. + * Metrics for image object detection evaluation results. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaAnnotationSpecColor { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageObjectDetectionEvaluationMetrics { /** - * The color of the AnnotationSpec in a segmentation mask. + * The single metric for bounding boxes evaluation: the `meanAveragePrecision` averaged over all `boundingBoxMetricsEntries`. */ - color?: Schema$GoogleTypeColor; + boundingBoxMeanAveragePrecision?: number | null; /** - * The display name of the AnnotationSpec represented by the color in the segmentation mask. + * The bounding boxes match metrics for each intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */ - displayName?: string | null; + boundingBoxMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetrics[]; /** - * The ID of the AnnotationSpec represented by the color in the segmentation mask. + * The total number of bounding boxes (i.e. summed over all images) the ground truth used to create this evaluation had. */ - id?: string | null; + evaluatedBoundingBoxCount?: number | null; } /** - * Annotation details specific to image object detection. + * Metrics for image segmentation evaluation results. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageBoundingBoxAnnotation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageSegmentationEvaluationMetrics { /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. + * Metrics for each confidenceThreshold in 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 Precision-recall curve can be derived from it. */ - annotationSpecId?: string | null; + confidenceMetricsEntries?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageSegmentationEvaluationMetricsConfidenceMetricsEntry[]; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageSegmentationEvaluationMetricsConfidenceMetricsEntry { /** - * The display name of the AnnotationSpec that this Annotation pertains to. + * Metrics are computed with an assumption that the model never returns predictions with score lower than this value. */ - displayName?: string | null; + confidenceThreshold?: number | null; /** - * The rightmost coordinate of the bounding box. + * Confusion matrix for the given confidence threshold. */ - xMax?: number | null; + confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; /** - * The leftmost coordinate of the bounding box. + * DSC or the F1 score, The harmonic mean of recall and precision. */ - xMin?: number | null; + diceScoreCoefficient?: number | null; /** - * The bottommost coordinate of the bounding box. + * The intersection-over-union score. The measure of overlap of the annotation's category mask with ground truth category mask on the DataItem. */ - yMax?: number | null; + iouScore?: number | null; /** - * The topmost coordinate of the bounding box. + * Precision for the given confidence threshold. */ - yMin?: number | null; + precision?: number | null; + /** + * Recall (True Positive Rate) for the given confidence threshold. + */ + recall?: number | null; } /** - * Annotation details specific to image classification. + * Metrics for general pairwise text generation evaluation results. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageClassificationAnnotation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsPairwiseTextGenerationEvaluationMetrics { /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. + * Fraction of cases where the autorater agreed with the human raters. */ - annotationSpecId?: string | null; + accuracy?: number | null; /** - * The display name of the AnnotationSpec that this Annotation pertains to. + * Percentage of time the autorater decided the baseline model had the better response. */ - displayName?: string | null; - } - /** - * Payload of Image DataItem. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageDataItem { + baselineModelWinRate?: number | null; /** - * Required. Google Cloud Storage URI points to the original image in user's bucket. The image is up to 30MB in size. + * A measurement of agreement between the autorater and human raters that takes the likelihood of random agreement into account. */ - gcsUri?: string | null; + cohensKappa?: number | null; /** - * Output only. The mime type of the content of the image. Only the images in below listed mime types are supported. - image/jpeg - image/gif - image/png - image/webp - image/bmp - image/tiff - image/vnd.microsoft.icon + * Harmonic mean of precision and recall. */ - mimeType?: string | null; - } - /** - * The metadata of Datasets that contain Image DataItems. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageDatasetMetadata { + f1Score?: number | null; /** - * Points to a YAML file stored on Google Cloud Storage describing payload of the Image DataItems that belong to this Dataset. + * Number of examples where the autorater chose the baseline model, but humans preferred the model. */ - dataItemSchemaUri?: string | null; + falseNegativeCount?: string | null; /** - * Google Cloud Storage Bucket name that contains the blob data of this Dataset. + * Number of examples where the autorater chose the model, but humans preferred the baseline model. */ - gcsBucket?: string | null; - } - /** - * Annotation details specific to image segmentation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotation { + falsePositiveCount?: string | null; /** - * Mask based segmentation annotation. Only one mask annotation can exist for one image. + * Percentage of time humans decided the baseline model had the better response. */ - maskAnnotation?: Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationMaskAnnotation; + humanPreferenceBaselineModelWinRate?: number | null; /** - * Polygon annotation. + * Percentage of time humans decided the model had the better response. */ - polygonAnnotation?: Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolygonAnnotation; + humanPreferenceModelWinRate?: number | null; /** - * Polyline annotation. + * Percentage of time the autorater decided the model had the better response. */ - polylineAnnotation?: Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolylineAnnotation; - } - /** - * The mask based segmentation annotation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationMaskAnnotation { + modelWinRate?: number | null; /** - * The mapping between color and AnnotationSpec for this Annotation. + * Fraction of cases where the autorater and humans thought the model had a better response out of all cases where the autorater thought the model had a better response. True positive divided by all positive. */ - annotationSpecColors?: Schema$GoogleCloudAiplatformV1beta1SchemaAnnotationSpecColor[]; + precision?: number | null; /** - * Google Cloud Storage URI that points to the mask image. The image must be in PNG format. It must have the same size as the DataItem's image. Each pixel in the image mask represents the AnnotationSpec which the pixel in the image DataItem belong to. Each color is mapped to one AnnotationSpec based on annotation_spec_colors. + * Fraction of cases where the autorater and humans thought the model had a better response out of all cases where the humans thought the model had a better response. */ - maskGcsUri?: string | null; - } - /** - * Represents a polygon in image. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolygonAnnotation { + recall?: number | null; /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. + * Number of examples where both the autorater and humans decided that the model had the worse response. */ - annotationSpecId?: string | null; + trueNegativeCount?: string | null; /** - * The display name of the AnnotationSpec that this Annotation pertains to. + * Number of examples where both the autorater and humans decided that the model had the better response. */ - displayName?: string | null; + truePositiveCount?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsQuestionAnsweringEvaluationMetrics { /** - * The vertexes are connected one by one and the last vertex is connected to the first one to represent a polygon. + * The rate at which the input predicted strings exactly match their references. */ - vertexes?: Schema$GoogleCloudAiplatformV1beta1SchemaVertex[]; + exactMatch?: number | null; } /** - * Represents a polyline in image. + * Metrics for regression evaluation results. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaImageSegmentationAnnotationPolylineAnnotation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsRegressionEvaluationMetrics { /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. + * Mean Absolute Error (MAE). */ - annotationSpecId?: string | null; + meanAbsoluteError?: number | null; /** - * The display name of the AnnotationSpec that this Annotation pertains to. + * Mean absolute percentage error. Infinity when there are zeros in the ground truth. */ - displayName?: string | null; + meanAbsolutePercentageError?: number | null; /** - * The vertexes are connected one by one and the last vertex in not connected to the first one. + * Root Mean Squared Error (RMSE). */ - vertexes?: Schema$GoogleCloudAiplatformV1beta1SchemaVertex[]; - } - /** - * Bounding box matching model metrics for a single intersection-over-union threshold and multiple label match confidence thresholds. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetrics { + rootMeanSquaredError?: number | null; /** - * Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived from them. + * Root mean squared log error. Undefined when there are negative ground truth values or predictions. */ - confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetricsConfidenceMetrics[]; + rootMeanSquaredLogError?: number | null; /** - * The intersection-over-union threshold value used to compute this metrics entry. + * Coefficient of determination as Pearson correlation coefficient. Undefined when ground truth or predictions are constant or near constant. */ - iouThreshold?: number | null; + rSquared?: number | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsSummarizationEvaluationMetrics { /** - * The mean average precision, most often close to `auPrc`. + * ROUGE-L (Longest Common Subsequence) scoring at summary level. */ - meanAveragePrecision?: number | null; + rougeLSum?: number | null; } /** - * Metrics for a single confidence threshold. + * Metrics for text extraction evaluation results. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetricsConfidenceMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextExtractionEvaluationMetrics { /** - * The confidence threshold value used to compute the metrics. + * Metrics that have confidence thresholds. Precision-recall curve can be derived from them. + */ + confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextExtractionEvaluationMetricsConfidenceMetrics[]; + /** + * Confusion matrix of the evaluation. Only set for Models where number of AnnotationSpecs is no more than 10. Only set for ModelEvaluations, not for ModelEvaluationSlices. + */ + confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextExtractionEvaluationMetricsConfidenceMetrics { + /** + * Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. */ confidenceThreshold?: number | null; /** @@ -12534,1405 +12417,1343 @@ export namespace aiplatform_v1beta1 { */ f1Score?: number | null; /** - * Precision under the given confidence threshold. + * Precision for the given confidence threshold. */ precision?: number | null; /** - * Recall under the given confidence threshold. + * Recall (True Positive Rate) for the given confidence threshold. */ recall?: number | null; } /** - * Metrics for classification evaluation results. + * Model evaluation metrics for text sentiment problems. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsClassificationEvaluationMetrics { - /** - * The Area Under Precision-Recall Curve metric. Micro-averaged for the overall evaluation. - */ - auPrc?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextSentimentEvaluationMetrics { /** - * The Area Under Receiver Operating Characteristic curve metric. Micro-averaged for the overall evaluation. + * Confusion matrix of the evaluation. Only set for ModelEvaluations, not for ModelEvaluationSlices. */ - auRoc?: number | null; + confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; /** - * Metrics for each `confidenceThreshold` in 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and `positionThreshold` = INT32_MAX_VALUE. ROC and precision-recall curves, and other aggregated metrics are derived from them. The confidence metrics entries may also be supplied for additional values of `positionThreshold`, but from these no aggregated metrics are computed. + * The harmonic mean of recall and precision. */ - confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics[]; + f1Score?: number | null; /** - * Confusion matrix of the evaluation. + * Linear weighted kappa. Only set for ModelEvaluations, not for ModelEvaluationSlices. */ - confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; + linearKappa?: number | null; /** - * The Log Loss metric. + * Mean absolute error. Only set for ModelEvaluations, not for ModelEvaluationSlices. */ - logLoss?: number | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics { + meanAbsoluteError?: number | null; /** - * Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. + * Mean squared error. Only set for ModelEvaluations, not for ModelEvaluationSlices. */ - confidenceThreshold?: number | null; + meanSquaredError?: number | null; /** - * Confusion matrix of the evaluation for this confidence_threshold. + * Precision. */ - confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; + precision?: number | null; /** - * The harmonic mean of recall and precision. For summary metrics, it computes the micro-averaged F1 score. + * Quadratic weighted kappa. Only set for ModelEvaluations, not for ModelEvaluationSlices. */ - f1Score?: number | null; + quadraticKappa?: number | null; /** - * The harmonic mean of recallAt1 and precisionAt1. + * Recall. */ - f1ScoreAt1?: number | null; + recall?: number | null; + } + /** + * UNIMPLEMENTED. Track matching model metrics for a single track match threshold and multiple label match confidence thresholds. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetrics { /** - * Macro-averaged F1 Score. + * Metrics for each label-match `confidenceThreshold` from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived from them. */ - f1ScoreMacro?: number | null; + confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetricsConfidenceMetrics[]; /** - * Micro-averaged F1 Score. + * The intersection-over-union threshold value between bounding boxes across frames used to compute this metric entry. */ - f1ScoreMicro?: number | null; + iouThreshold?: number | null; /** - * The number of ground truth labels that are not matched by a Model created label. + * The mean bounding box iou over all confidence thresholds. */ - falseNegativeCount?: string | null; + meanBoundingBoxIou?: number | null; /** - * The number of Model created labels that do not match a ground truth label. + * The mean mismatch rate over all confidence thresholds. */ - falsePositiveCount?: string | null; + meanMismatchRate?: number | null; /** - * False Positive Rate for the given confidence threshold. + * The mean average precision over all confidence thresholds. */ - falsePositiveRate?: number | null; + meanTrackingAveragePrecision?: number | null; + } + /** + * Metrics for a single confidence threshold. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetricsConfidenceMetrics { /** - * The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. + * Bounding box intersection-over-union precision. Measures how well the bounding boxes overlap between each other (e.g. complete overlap or just barely above iou_threshold). */ - falsePositiveRateAt1?: number | null; + boundingBoxIou?: number | null; /** - * Metrics are computed with an assumption that the Model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the `confidenceThreshold`. + * The confidence threshold value used to compute the metrics. */ - maxPredictions?: number | null; + confidenceThreshold?: number | null; /** - * Precision for the given confidence threshold. + * Mismatch rate, which measures the tracking consistency, i.e. correctness of instance ID continuity. */ - precision?: number | null; + mismatchRate?: number | null; /** - * The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. + * Tracking precision. */ - precisionAt1?: number | null; + trackingPrecision?: number | null; /** - * Recall (True Positive Rate) for the given confidence threshold. + * Tracking recall. */ - recall?: number | null; + trackingRecall?: number | null; + } + /** + * The Evaluation metrics given a specific precision_window_length. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetrics { /** - * The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. + * Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */ - recallAt1?: number | null; + confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetricsConfidenceMetrics[]; /** - * The number of labels that were not created by the Model, but if they would, they would not match a ground truth label. + * The mean average precision. */ - trueNegativeCount?: string | null; + meanAveragePrecision?: number | null; /** - * The number of Model created labels that match a ground truth label. + * This VideoActionMetrics is calculated based on this prediction window length. If the predicted action's timestamp is inside the time window whose center is the ground truth action's timestamp with this specific length, the prediction result is treated as a true positive. */ - truePositiveCount?: string | null; + precisionWindowLength?: string | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix { + /** + * Metrics for a single confidence threshold. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetricsConfidenceMetrics { /** - * AnnotationSpecs used in the confusion matrix. For AutoML Text Extraction, a special negative AnnotationSpec with empty `id` and `displayName` of "NULL" will be added as the last element. + * Output only. The confidence threshold value used to compute the metrics. */ - annotationSpecs?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef[]; + confidenceThreshold?: number | null; /** - * Rows in the confusion matrix. The number of rows is equal to the size of `annotationSpecs`. `rowsi` is the number of DataItems that have ground truth of the `annotationSpecs[i]` and are predicted as `annotationSpecs[j]` by the Model being evaluated. For Text Extraction, when `annotationSpecs[i]` is the last element in `annotationSpecs`, i.e. the special negative AnnotationSpec, `rowsi` is the number of predicted entities of `annoatationSpec[j]` that are not labeled as any of the ground truth AnnotationSpec. When annotationSpecs[j] is the special negative AnnotationSpec, `rowsi` is the number of entities have ground truth of `annotationSpec[i]` that are not predicted as an entity by the Model. The value of the last cell, i.e. `rowi` where i == j and `annotationSpec[i]` is the special negative AnnotationSpec, is always 0. + * Output only. The harmonic mean of recall and precision. */ - rows?: any[][] | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef { + f1Score?: number | null; /** - * Display name of the AnnotationSpec. + * Output only. Precision for the given confidence threshold. */ - displayName?: string | null; + precision?: number | null; /** - * ID of the AnnotationSpec. + * Output only. Recall for the given confidence threshold. */ - id?: string | null; + recall?: number | null; } /** - * Metrics for forecasting evaluation results. + * Model evaluation metrics for video action recognition. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsForecastingEvaluationMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionRecognitionMetrics { /** - * Mean Absolute Error (MAE). + * The number of ground truth actions used to create this evaluation. */ - meanAbsoluteError?: number | null; + evaluatedActionCount?: number | null; /** - * Mean absolute percentage error. Infinity when there are zeros in the ground truth. + * The metric entries for precision window lengths: 1s,2s,3s. */ - meanAbsolutePercentageError?: number | null; + videoActionMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetrics[]; + } + /** + * Model evaluation metrics for video object tracking problems. Evaluates prediction quality of both labeled bounding boxes and labeled tracks (i.e. series of bounding boxes sharing same label and instance ID). + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoObjectTrackingMetrics { /** - * The quantile metrics entries for each quantile. + * The single metric for bounding boxes evaluation: the `meanAveragePrecision` averaged over all `boundingBoxMetrics`. */ - quantileMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsForecastingEvaluationMetricsQuantileMetricsEntry[]; + boundingBoxMeanAveragePrecision?: number | null; /** - * Root Mean Squared Error (RMSE). + * The bounding boxes match metrics for each intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */ - rootMeanSquaredError?: number | null; + boundingBoxMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetrics[]; /** - * Root mean squared log error. Undefined when there are negative ground truth values or predictions. + * UNIMPLEMENTED. The total number of bounding boxes (i.e. summed over all frames) the ground truth used to create this evaluation had. */ - rootMeanSquaredLogError?: number | null; + evaluatedBoundingBoxCount?: number | null; /** - * Root Mean Square Percentage Error. Square root of MSPE. Undefined/imaginary when MSPE is negative. + * UNIMPLEMENTED. The number of video frames used to create this evaluation. */ - rootMeanSquaredPercentageError?: number | null; + evaluatedFrameCount?: number | null; /** - * Coefficient of determination as Pearson correlation coefficient. Undefined when ground truth or predictions are constant or near constant. + * UNIMPLEMENTED. The total number of tracks (i.e. as seen across all frames) the ground truth used to create this evaluation had. */ - rSquared?: number | null; + evaluatedTrackCount?: number | null; /** - * Weighted Absolute Percentage Error. Does not use weights, this is just what the metric is called. Undefined if actual values sum to zero. Will be very large if actual values sum to a very small number. + * UNIMPLEMENTED. The single metric for tracks accuracy evaluation: the `meanAveragePrecision` averaged over all `trackMetrics`. */ - weightedAbsolutePercentageError?: number | null; - } - /** - * Entry for the Quantiles loss type optimization objective. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsForecastingEvaluationMetricsQuantileMetricsEntry { + trackMeanAveragePrecision?: number | null; /** - * This is a custom metric that calculates the percentage of true values that were less than the predicted value for that quantile. Only populated when optimization_objective is minimize-quantile-loss and each entry corresponds to an entry in quantiles The percent value can be used to compare with the quantile value, which is the target value. + * UNIMPLEMENTED. The single metric for tracks bounding box iou evaluation: the `meanBoundingBoxIou` averaged over all `trackMetrics`. */ - observedQuantile?: number | null; + trackMeanBoundingBoxIou?: number | null; /** - * The quantile for this entry. + * UNIMPLEMENTED. The single metric for tracking consistency evaluation: the `meanMismatchRate` averaged over all `trackMetrics`. */ - quantile?: number | null; + trackMeanMismatchRate?: number | null; /** - * The scaled pinball loss of this quantile. + * UNIMPLEMENTED. The tracks match metrics for each intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */ - scaledPinballLoss?: number | null; + trackMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetrics[]; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsGeneralTextGenerationEvaluationMetrics { + /** + * Prediction input format for Image Classification. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceImageClassificationPredictionInstance { /** - * BLEU (bilingual evaluation understudy) scores based on sacrebleu implementation. + * The image bytes or Cloud Storage URI to make the prediction on. */ - bleu?: number | null; + content?: string | null; /** - * ROUGE-L (Longest Common Subsequence) scoring at summary level. + * The MIME type of the content of the image. Only the images in below listed MIME types are supported. - image/jpeg - image/gif - image/png - image/webp - image/bmp - image/tiff - image/vnd.microsoft.icon */ - rougeLSum?: number | null; + mimeType?: string | null; } /** - * Metrics for image object detection evaluation results. + * Prediction input format for Image Object Detection. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageObjectDetectionEvaluationMetrics { - /** - * The single metric for bounding boxes evaluation: the `meanAveragePrecision` averaged over all `boundingBoxMetricsEntries`. - */ - boundingBoxMeanAveragePrecision?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceImageObjectDetectionPredictionInstance { /** - * The bounding boxes match metrics for each intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + * The image bytes or Cloud Storage URI to make the prediction on. */ - boundingBoxMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetrics[]; + content?: string | null; /** - * The total number of bounding boxes (i.e. summed over all images) the ground truth used to create this evaluation had. + * The MIME type of the content of the image. Only the images in below listed MIME types are supported. - image/jpeg - image/gif - image/png - image/webp - image/bmp - image/tiff - image/vnd.microsoft.icon */ - evaluatedBoundingBoxCount?: number | null; + mimeType?: string | null; } /** - * Metrics for image segmentation evaluation results. + * Prediction input format for Image Segmentation. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageSegmentationEvaluationMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceImageSegmentationPredictionInstance { /** - * Metrics for each confidenceThreshold in 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 Precision-recall curve can be derived from it. + * The image bytes to make the predictions on. */ - confidenceMetricsEntries?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageSegmentationEvaluationMetricsConfidenceMetricsEntry[]; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsImageSegmentationEvaluationMetricsConfidenceMetricsEntry { + content?: string | null; /** - * Metrics are computed with an assumption that the model never returns predictions with score lower than this value. + * The MIME type of the content of the image. Only the images in below listed MIME types are supported. - image/jpeg - image/png */ - confidenceThreshold?: number | null; + mimeType?: string | null; + } + /** + * Prediction input format for Text Classification. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceTextClassificationPredictionInstance { /** - * Confusion matrix for the given confidence threshold. + * The text snippet to make the predictions on. */ - confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; + content?: string | null; /** - * DSC or the F1 score, The harmonic mean of recall and precision. + * The MIME type of the text snippet. The supported MIME types are listed below. - text/plain */ - diceScoreCoefficient?: number | null; + mimeType?: string | null; + } + /** + * Prediction input format for Text Extraction. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceTextExtractionPredictionInstance { /** - * The intersection-over-union score. The measure of overlap of the annotation's category mask with ground truth category mask on the DataItem. + * The text snippet to make the predictions on. */ - iouScore?: number | null; + content?: string | null; /** - * Precision for the given confidence threshold. + * This field is only used for batch prediction. If a key is provided, the batch prediction result will by mapped to this key. If omitted, then the batch prediction result will contain the entire input instance. Vertex AI will not check if keys in the request are duplicates, so it is up to the caller to ensure the keys are unique. */ - precision?: number | null; + key?: string | null; /** - * Recall (True Positive Rate) for the given confidence threshold. + * The MIME type of the text snippet. The supported MIME types are listed below. - text/plain */ - recall?: number | null; + mimeType?: string | null; } /** - * Metrics for general pairwise text generation evaluation results. + * Prediction input format for Text Sentiment. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsPairwiseTextGenerationEvaluationMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceTextSentimentPredictionInstance { /** - * Fraction of cases where the autorater agreed with the human raters. + * The text snippet to make the predictions on. */ - accuracy?: number | null; + content?: string | null; /** - * Percentage of time the autorater decided the baseline model had the better response. + * The MIME type of the text snippet. The supported MIME types are listed below. - text/plain */ - baselineModelWinRate?: number | null; + mimeType?: string | null; + } + /** + * Prediction input format for Video Action Recognition. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceVideoActionRecognitionPredictionInstance { /** - * A measurement of agreement between the autorater and human raters that takes the likelihood of random agreement into account. + * The Google Cloud Storage location of the video on which to perform the prediction. */ - cohensKappa?: number | null; + content?: string | null; /** - * Harmonic mean of precision and recall. + * The MIME type of the content of the video. Only the following are supported: video/mp4 video/avi video/quicktime */ - f1Score?: number | null; + mimeType?: string | null; /** - * Number of examples where the autorater chose the baseline model, but humans preferred the model. + * The end, exclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision, and "inf" or "Infinity" is allowed, which means the end of the video. */ - falseNegativeCount?: string | null; + timeSegmentEnd?: string | null; /** - * Number of examples where the autorater chose the model, but humans preferred the baseline model. + * The beginning, inclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision. */ - falsePositiveCount?: string | null; + timeSegmentStart?: string | null; + } + /** + * Prediction input format for Video Classification. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceVideoClassificationPredictionInstance { /** - * Percentage of time humans decided the baseline model had the better response. + * The Google Cloud Storage location of the video on which to perform the prediction. */ - humanPreferenceBaselineModelWinRate?: number | null; + content?: string | null; /** - * Percentage of time humans decided the model had the better response. + * The MIME type of the content of the video. Only the following are supported: video/mp4 video/avi video/quicktime */ - humanPreferenceModelWinRate?: number | null; + mimeType?: string | null; /** - * Percentage of time the autorater decided the model had the better response. + * The end, exclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision, and "inf" or "Infinity" is allowed, which means the end of the video. */ - modelWinRate?: number | null; + timeSegmentEnd?: string | null; /** - * Fraction of cases where the autorater and humans thought the model had a better response out of all cases where the autorater thought the model had a better response. True positive divided by all positive. + * The beginning, inclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision. */ - precision?: number | null; + timeSegmentStart?: string | null; + } + /** + * Prediction input format for Video Object Tracking. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceVideoObjectTrackingPredictionInstance { /** - * Fraction of cases where the autorater and humans thought the model had a better response out of all cases where the humans thought the model had a better response. + * The Google Cloud Storage location of the video on which to perform the prediction. */ - recall?: number | null; + content?: string | null; /** - * Number of examples where both the autorater and humans decided that the model had the worse response. + * The MIME type of the content of the video. Only the following are supported: video/mp4 video/avi video/quicktime */ - trueNegativeCount?: string | null; + mimeType?: string | null; /** - * Number of examples where both the autorater and humans decided that the model had the better response. + * The end, exclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision, and "inf" or "Infinity" is allowed, which means the end of the video. */ - truePositiveCount?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsQuestionAnsweringEvaluationMetrics { + timeSegmentEnd?: string | null; /** - * The rate at which the input predicted strings exactly match their references. + * The beginning, inclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision. */ - exactMatch?: number | null; + timeSegmentStart?: string | null; } /** - * Metrics for regression evaluation results. + * Represents a line of JSONL in the batch prediction output file. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsRegressionEvaluationMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictionResult { /** - * Mean Absolute Error (MAE). + * The error result. Do not set prediction if this is set. */ - meanAbsoluteError?: number | null; + error?: Schema$GoogleCloudAiplatformV1beta1SchemaPredictionResultError; /** - * Mean absolute percentage error. Infinity when there are zeros in the ground truth. + * User's input instance. Struct is used here instead of Any so that JsonFormat does not append an extra "@type" field when we convert the proto to JSON. */ - meanAbsolutePercentageError?: number | null; + instance?: {[key: string]: any} | null; /** - * Root Mean Squared Error (RMSE). + * Optional user-provided key from the input instance. */ - rootMeanSquaredError?: number | null; + key?: string | null; /** - * Root mean squared log error. Undefined when there are negative ground truth values or predictions. + * The prediction result. Value is used here instead of Any so that JsonFormat does not append an extra "@type" field when we convert the proto to JSON and so we can represent array of objects. Do not set error if this is set. */ - rootMeanSquaredLogError?: number | null; + prediction?: any | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictionResultError { /** - * Coefficient of determination as Pearson correlation coefficient. Undefined when ground truth or predictions are constant or near constant. + * Error message with additional details. */ - rSquared?: number | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsSummarizationEvaluationMetrics { + message?: string | null; /** - * ROUGE-L (Longest Common Subsequence) scoring at summary level. + * Error status. This will be serialized into the enum name e.g. "NOT_FOUND". */ - rougeLSum?: number | null; + status?: string | null; } /** - * Metrics for text extraction evaluation results. + * Prediction model parameters for Image Classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextExtractionEvaluationMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsImageClassificationPredictionParams { /** - * Metrics that have confidence thresholds. Precision-recall curve can be derived from them. + * The Model only returns predictions with at least this confidence score. Default value is 0.0 */ - confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextExtractionEvaluationMetricsConfidenceMetrics[]; + confidenceThreshold?: number | null; /** - * Confusion matrix of the evaluation. Only set for Models where number of AnnotationSpecs is no more than 10. Only set for ModelEvaluations, not for ModelEvaluationSlices. + * The Model only returns up to that many top, by confidence score, predictions per instance. If this number is very high, the Model may return fewer predictions. Default value is 10. */ - confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; + maxPredictions?: number | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextExtractionEvaluationMetricsConfidenceMetrics { + /** + * Prediction model parameters for Image Object Detection. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsImageObjectDetectionPredictionParams { /** - * Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. + * The Model only returns predictions with at least this confidence score. Default value is 0.0 */ confidenceThreshold?: number | null; /** - * The harmonic mean of recall and precision. - */ - f1Score?: number | null; - /** - * Precision for the given confidence threshold. - */ - precision?: number | null; - /** - * Recall (True Positive Rate) for the given confidence threshold. + * The Model only returns up to that many top, by confidence score, predictions per instance. Note that number of returned predictions is also limited by metadata's predictionsLimit. Default value is 10. */ - recall?: number | null; + maxPredictions?: number | null; } /** - * Model evaluation metrics for text sentiment problems. + * Prediction model parameters for Image Segmentation. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTextSentimentEvaluationMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsImageSegmentationPredictionParams { /** - * Confusion matrix of the evaluation. Only set for ModelEvaluations, not for ModelEvaluationSlices. + * When the model predicts category of pixels of the image, it will only provide predictions for pixels that it is at least this much confident about. All other pixels will be classified as background. Default value is 0.5. */ - confusionMatrix?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsConfusionMatrix; + confidenceThreshold?: number | null; + } + /** + * Prediction model parameters for Video Action Recognition. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsVideoActionRecognitionPredictionParams { /** - * The harmonic mean of recall and precision. + * The Model only returns predictions with at least this confidence score. Default value is 0.0 */ - f1Score?: number | null; + confidenceThreshold?: number | null; /** - * Linear weighted kappa. Only set for ModelEvaluations, not for ModelEvaluationSlices. + * The model only returns up to that many top, by confidence score, predictions per frame of the video. If this number is very high, the Model may return fewer predictions per frame. Default value is 50. */ - linearKappa?: number | null; - /** - * Mean absolute error. Only set for ModelEvaluations, not for ModelEvaluationSlices. - */ - meanAbsoluteError?: number | null; - /** - * Mean squared error. Only set for ModelEvaluations, not for ModelEvaluationSlices. - */ - meanSquaredError?: number | null; - /** - * Precision. - */ - precision?: number | null; - /** - * Quadratic weighted kappa. Only set for ModelEvaluations, not for ModelEvaluationSlices. - */ - quadraticKappa?: number | null; - /** - * Recall. - */ - recall?: number | null; + maxPredictions?: number | null; } /** - * UNIMPLEMENTED. Track matching model metrics for a single track match threshold and multiple label match confidence thresholds. + * Prediction model parameters for Video Classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsVideoClassificationPredictionParams { /** - * Metrics for each label-match `confidenceThreshold` from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived from them. + * The Model only returns predictions with at least this confidence score. Default value is 0.0 */ - confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetricsConfidenceMetrics[]; + confidenceThreshold?: number | null; /** - * The intersection-over-union threshold value between bounding boxes across frames used to compute this metric entry. + * The Model only returns up to that many top, by confidence score, predictions per instance. If this number is very high, the Model may return fewer predictions. Default value is 10,000. */ - iouThreshold?: number | null; + maxPredictions?: number | null; /** - * The mean bounding box iou over all confidence thresholds. + * Set to true to request classification for a video at one-second intervals. Vertex AI returns labels and their confidence scores for each second of the entire time segment of the video that user specified in the input WARNING: Model evaluation is not done for this classification type, the quality of it depends on the training data, but there are no metrics provided to describe that quality. Default value is false */ - meanBoundingBoxIou?: number | null; + oneSecIntervalClassification?: boolean | null; /** - * The mean mismatch rate over all confidence thresholds. + * Set to true to request segment-level classification. Vertex AI returns labels and their confidence scores for the entire time segment of the video that user specified in the input instance. Default value is true */ - meanMismatchRate?: number | null; + segmentClassification?: boolean | null; /** - * The mean average precision over all confidence thresholds. + * Set to true to request shot-level classification. Vertex AI determines the boundaries for each camera shot in the entire time segment of the video that user specified in the input instance. Vertex AI then returns labels and their confidence scores for each detected shot, along with the start and end time of the shot. WARNING: Model evaluation is not done for this classification type, the quality of it depends on the training data, but there are no metrics provided to describe that quality. Default value is false */ - meanTrackingAveragePrecision?: number | null; + shotClassification?: boolean | null; } /** - * Metrics for a single confidence threshold. + * Prediction model parameters for Video Object Tracking. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetricsConfidenceMetrics { - /** - * Bounding box intersection-over-union precision. Measures how well the bounding boxes overlap between each other (e.g. complete overlap or just barely above iou_threshold). - */ - boundingBoxIou?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsVideoObjectTrackingPredictionParams { /** - * The confidence threshold value used to compute the metrics. + * The Model only returns predictions with at least this confidence score. Default value is 0.0 */ confidenceThreshold?: number | null; /** - * Mismatch rate, which measures the tracking consistency, i.e. correctness of instance ID continuity. - */ - mismatchRate?: number | null; - /** - * Tracking precision. + * The model only returns up to that many top, by confidence score, predictions per frame of the video. If this number is very high, the Model may return fewer predictions per frame. Default value is 50. */ - trackingPrecision?: number | null; + maxPredictions?: number | null; /** - * Tracking recall. + * Only bounding boxes with shortest edge at least that long as a relative value of video frame size are returned. Default value is 0.0. */ - trackingRecall?: number | null; + minBoundingBoxSize?: number | null; } /** - * The Evaluation metrics given a specific precision_window_length. + * Prediction output format for Image and Text Classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionClassificationPredictionResult { /** - * Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. + * The Model's confidences in correctness of the predicted IDs, higher value means higher confidence. Order matches the Ids. */ - confidenceMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetricsConfidenceMetrics[]; + confidences?: number[] | null; /** - * The mean average precision. + * The display names of the AnnotationSpecs that had been identified, order matches the IDs. */ - meanAveragePrecision?: number | null; + displayNames?: string[] | null; /** - * This VideoActionMetrics is calculated based on this prediction window length. If the predicted action's timestamp is inside the time window whose center is the ground truth action's timestamp with this specific length, the prediction result is treated as a true positive. + * The resource IDs of the AnnotationSpecs that had been identified. */ - precisionWindowLength?: string | null; + ids?: string[] | null; } /** - * Metrics for a single confidence threshold. + * Prediction output format for Image Object Detection. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetricsConfidenceMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionImageObjectDetectionPredictionResult { /** - * Output only. The confidence threshold value used to compute the metrics. + * Bounding boxes, i.e. the rectangles over the image, that pinpoint the found AnnotationSpecs. Given in order that matches the IDs. Each bounding box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and `yMax`, which represent the extremal coordinates of the box. They are relative to the image size, and the point 0,0 is in the top left of the image. */ - confidenceThreshold?: number | null; + bboxes?: any[][] | null; /** - * Output only. The harmonic mean of recall and precision. + * The Model's confidences in correctness of the predicted IDs, higher value means higher confidence. Order matches the Ids. */ - f1Score?: number | null; + confidences?: number[] | null; /** - * Output only. Precision for the given confidence threshold. + * The display names of the AnnotationSpecs that had been identified, order matches the IDs. */ - precision?: number | null; + displayNames?: string[] | null; /** - * Output only. Recall for the given confidence threshold. + * The resource IDs of the AnnotationSpecs that had been identified, ordered by the confidence score descendingly. */ - recall?: number | null; + ids?: string[] | null; } /** - * Model evaluation metrics for video action recognition. + * Prediction output format for Image Segmentation. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionRecognitionMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionImageSegmentationPredictionResult { /** - * The number of ground truth actions used to create this evaluation. + * A PNG image where each pixel in the mask represents the category in which the pixel in the original image was predicted to belong to. The size of this image will be the same as the original image. The mapping between the AnntoationSpec and the color can be found in model's metadata. The model will choose the most likely category and if none of the categories reach the confidence threshold, the pixel will be marked as background. */ - evaluatedActionCount?: number | null; + categoryMask?: string | null; /** - * The metric entries for precision window lengths: 1s,2s,3s. + * A one channel image which is encoded as an 8bit lossless PNG. The size of the image will be the same as the original image. For a specific pixel, darker color means less confidence in correctness of the cateogry in the categoryMask for the corresponding pixel. Black means no confidence and white means complete confidence. */ - videoActionMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoActionMetrics[]; + confidenceMask?: string | null; } /** - * Model evaluation metrics for video object tracking problems. Evaluates prediction quality of both labeled bounding boxes and labeled tracks (i.e. series of bounding boxes sharing same label and instance ID). + * Prediction output format for Tabular Classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsVideoObjectTrackingMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTabularClassificationPredictionResult { /** - * The single metric for bounding boxes evaluation: the `meanAveragePrecision` averaged over all `boundingBoxMetrics`. + * The name of the classes being classified, contains all possible values of the target column. */ - boundingBoxMeanAveragePrecision?: number | null; + classes?: string[] | null; /** - * The bounding boxes match metrics for each intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + * The model's confidence in each class being correct, higher value means higher confidence. The N-th score corresponds to the N-th class in classes. */ - boundingBoxMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsBoundingBoxMetrics[]; + scores?: number[] | null; + } + /** + * Prediction output format for Tabular Regression. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTabularRegressionPredictionResult { /** - * UNIMPLEMENTED. The total number of bounding boxes (i.e. summed over all frames) the ground truth used to create this evaluation had. + * The lower bound of the prediction interval. */ - evaluatedBoundingBoxCount?: number | null; + lowerBound?: number | null; /** - * UNIMPLEMENTED. The number of video frames used to create this evaluation. + * Quantile predictions, in 1-1 correspondence with quantile_values. */ - evaluatedFrameCount?: number | null; + quantilePredictions?: number[] | null; /** - * UNIMPLEMENTED. The total number of tracks (i.e. as seen across all frames) the ground truth used to create this evaluation had. + * Quantile values. */ - evaluatedTrackCount?: number | null; + quantileValues?: number[] | null; /** - * UNIMPLEMENTED. The single metric for tracks accuracy evaluation: the `meanAveragePrecision` averaged over all `trackMetrics`. + * The upper bound of the prediction interval. */ - trackMeanAveragePrecision?: number | null; + upperBound?: number | null; /** - * UNIMPLEMENTED. The single metric for tracks bounding box iou evaluation: the `meanBoundingBoxIou` averaged over all `trackMetrics`. + * The regression value. */ - trackMeanBoundingBoxIou?: number | null; + value?: number | null; + } + /** + * Prediction output format for Text Extraction. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTextExtractionPredictionResult { /** - * UNIMPLEMENTED. The single metric for tracking consistency evaluation: the `meanMismatchRate` averaged over all `trackMetrics`. + * The Model's confidences in correctness of the predicted IDs, higher value means higher confidence. Order matches the Ids. */ - trackMeanMismatchRate?: number | null; + confidences?: number[] | null; /** - * UNIMPLEMENTED. The tracks match metrics for each intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + * The display names of the AnnotationSpecs that had been identified, order matches the IDs. */ - trackMetrics?: Schema$GoogleCloudAiplatformV1beta1SchemaModelevaluationMetricsTrackMetrics[]; - } - /** - * Prediction input format for Image Classification. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceImageClassificationPredictionInstance { + displayNames?: string[] | null; /** - * The image bytes or Cloud Storage URI to make the prediction on. + * The resource IDs of the AnnotationSpecs that had been identified, ordered by the confidence score descendingly. */ - content?: string | null; + ids?: string[] | null; /** - * The MIME type of the content of the image. Only the images in below listed MIME types are supported. - image/jpeg - image/gif - image/png - image/webp - image/bmp - image/tiff - image/vnd.microsoft.icon + * The end offsets, inclusive, of the text segment in which the AnnotationSpec has been identified. Expressed as a zero-based number of characters as measured from the start of the text snippet. */ - mimeType?: string | null; + textSegmentEndOffsets?: string[] | null; + /** + * The start offsets, inclusive, of the text segment in which the AnnotationSpec has been identified. Expressed as a zero-based number of characters as measured from the start of the text snippet. + */ + textSegmentStartOffsets?: string[] | null; } /** - * Prediction input format for Image Object Detection. + * Prediction output format for Text Sentiment */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceImageObjectDetectionPredictionInstance { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTextSentimentPredictionResult { /** - * The image bytes or Cloud Storage URI to make the prediction on. + * The integer sentiment labels between 0 (inclusive) and sentimentMax label (inclusive), while 0 maps to the least positive sentiment and sentimentMax maps to the most positive one. The higher the score is, the more positive the sentiment in the text snippet is. Note: sentimentMax is an integer value between 1 (inclusive) and 10 (inclusive). */ - content?: string | null; + sentiment?: number | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTftFeatureImportance { + attributeColumns?: string[] | null; + attributeWeights?: number[] | null; + contextColumns?: string[] | null; /** - * The MIME type of the content of the image. Only the images in below listed MIME types are supported. - image/jpeg - image/gif - image/png - image/webp - image/bmp - image/tiff - image/vnd.microsoft.icon + * TFT feature importance values. Each pair for {context/horizon/attribute\} should have the same shape since the weight corresponds to the column names. */ - mimeType?: string | null; + contextWeights?: number[] | null; + horizonColumns?: string[] | null; + horizonWeights?: number[] | null; } /** - * Prediction input format for Image Segmentation. + * Prediction output format for Time Series Forecasting. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceImageSegmentationPredictionInstance { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTimeSeriesForecastingPredictionResult { /** - * The image bytes to make the predictions on. + * Quantile predictions, in 1-1 correspondence with quantile_values. */ - content?: string | null; + quantilePredictions?: number[] | null; /** - * The MIME type of the content of the image. Only the images in below listed MIME types are supported. - image/jpeg - image/png + * Quantile values. */ - mimeType?: string | null; - } - /** - * Prediction input format for Text Classification. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceTextClassificationPredictionInstance { + quantileValues?: number[] | null; /** - * The text snippet to make the predictions on. + * Only use these if TFt is enabled. */ - content?: string | null; + tftFeatureImportance?: Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTftFeatureImportance; /** - * The MIME type of the text snippet. The supported MIME types are listed below. - text/plain + * The regression value. */ - mimeType?: string | null; + value?: number | null; } /** - * Prediction input format for Text Extraction. + * Prediction output format for Video Action Recognition. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceTextExtractionPredictionInstance { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoActionRecognitionPredictionResult { /** - * The text snippet to make the predictions on. + * The Model's confidence in correction of this prediction, higher value means higher confidence. */ - content?: string | null; + confidence?: number | null; /** - * This field is only used for batch prediction. If a key is provided, the batch prediction result will by mapped to this key. If omitted, then the batch prediction result will contain the entire input instance. Vertex AI will not check if keys in the request are duplicates, so it is up to the caller to ensure the keys are unique. + * The display name of the AnnotationSpec that had been identified. */ - key?: string | null; + displayName?: string | null; /** - * The MIME type of the text snippet. The supported MIME types are listed below. - text/plain + * The resource ID of the AnnotationSpec that had been identified. */ - mimeType?: string | null; - } - /** - * Prediction input format for Text Sentiment. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceTextSentimentPredictionInstance { + id?: string | null; /** - * The text snippet to make the predictions on. + * The end, exclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. */ - content?: string | null; + timeSegmentEnd?: string | null; /** - * The MIME type of the text snippet. The supported MIME types are listed below. - text/plain + * The beginning, inclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. */ - mimeType?: string | null; + timeSegmentStart?: string | null; } /** - * Prediction input format for Video Action Recognition. + * Prediction output format for Video Classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceVideoActionRecognitionPredictionInstance { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoClassificationPredictionResult { /** - * The Google Cloud Storage location of the video on which to perform the prediction. + * The Model's confidence in correction of this prediction, higher value means higher confidence. */ - content?: string | null; + confidence?: number | null; /** - * The MIME type of the content of the video. Only the following are supported: video/mp4 video/avi video/quicktime + * The display name of the AnnotationSpec that had been identified. */ - mimeType?: string | null; + displayName?: string | null; /** - * The end, exclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision, and "inf" or "Infinity" is allowed, which means the end of the video. + * The resource ID of the AnnotationSpec that had been identified. + */ + id?: string | null; + /** + * The end, exclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. Note that for 'segment-classification' prediction type, this equals the original 'timeSegmentEnd' from the input instance, for other types it is the end of a shot or a 1 second interval respectively. */ timeSegmentEnd?: string | null; /** - * The beginning, inclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision. + * The beginning, inclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. Note that for 'segment-classification' prediction type, this equals the original 'timeSegmentStart' from the input instance, for other types it is the start of a shot or a 1 second interval respectively. */ timeSegmentStart?: string | null; + /** + * The type of the prediction. The requested types can be configured via parameters. This will be one of - segment-classification - shot-classification - one-sec-interval-classification + */ + type?: string | null; } /** - * Prediction input format for Video Classification. + * Prediction output format for Video Object Tracking. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceVideoClassificationPredictionInstance { - /** - * The Google Cloud Storage location of the video on which to perform the prediction. - */ - content?: string | null; - /** - * The MIME type of the content of the video. Only the following are supported: video/mp4 video/avi video/quicktime - */ - mimeType?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoObjectTrackingPredictionResult { /** - * The end, exclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision, and "inf" or "Infinity" is allowed, which means the end of the video. + * The Model's confidence in correction of this prediction, higher value means higher confidence. */ - timeSegmentEnd?: string | null; + confidence?: number | null; /** - * The beginning, inclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision. + * The display name of the AnnotationSpec that had been identified. */ - timeSegmentStart?: string | null; - } - /** - * Prediction input format for Video Object Tracking. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictInstanceVideoObjectTrackingPredictionInstance { + displayName?: string | null; /** - * The Google Cloud Storage location of the video on which to perform the prediction. + * All of the frames of the video in which a single object instance has been detected. The bounding boxes in the frames identify the same object. */ - content?: string | null; + frames?: Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame[]; /** - * The MIME type of the content of the video. Only the following are supported: video/mp4 video/avi video/quicktime + * The resource ID of the AnnotationSpec that had been identified. */ - mimeType?: string | null; + id?: string | null; /** - * The end, exclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision, and "inf" or "Infinity" is allowed, which means the end of the video. + * The end, inclusive, of the video's time segment in which the object instance has been detected. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. */ timeSegmentEnd?: string | null; /** - * The beginning, inclusive, of the video's time segment on which to perform the prediction. Expressed as a number of seconds as measured from the start of the video, with "s" appended at the end. Fractions are allowed, up to a microsecond precision. + * The beginning, inclusive, of the video's time segment in which the object instance has been detected. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. */ timeSegmentStart?: string | null; } /** - * Represents a line of JSONL in the batch prediction output file. + * The fields `xMin`, `xMax`, `yMin`, and `yMax` refer to a bounding box, i.e. the rectangle over the video frame pinpointing the found AnnotationSpec. The coordinates are relative to the frame size, and the point 0,0 is in the top left of the frame. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictionResult { - /** - * The error result. Do not set prediction if this is set. - */ - error?: Schema$GoogleCloudAiplatformV1beta1SchemaPredictionResultError; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame { /** - * User's input instance. Struct is used here instead of Any so that JsonFormat does not append an extra "@type" field when we convert the proto to JSON. + * A time (frame) of a video in which the object has been detected. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. */ - instance?: {[key: string]: any} | null; + timeOffset?: string | null; /** - * Optional user-provided key from the input instance. + * The rightmost coordinate of the bounding box. */ - key?: string | null; + xMax?: number | null; /** - * The prediction result. Value is used here instead of Any so that JsonFormat does not append an extra "@type" field when we convert the proto to JSON and so we can represent array of objects. Do not set error if this is set. + * The leftmost coordinate of the bounding box. */ - prediction?: any | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictionResultError { + xMin?: number | null; /** - * Error message with additional details. + * The bottommost coordinate of the bounding box. */ - message?: string | null; + yMax?: number | null; /** - * Error status. This will be serialized into the enum name e.g. "NOT_FOUND". + * The topmost coordinate of the bounding box. */ - status?: string | null; + yMin?: number | null; } /** - * Prediction model parameters for Image Classification. + * The metadata of Datasets that contain tables data. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsImageClassificationPredictionParams { - /** - * The Model only returns predictions with at least this confidence score. Default value is 0.0 - */ - confidenceThreshold?: number | null; - /** - * The Model only returns up to that many top, by confidence score, predictions per instance. If this number is very high, the Model may return fewer predictions. Default value is 10. - */ - maxPredictions?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadata { + inputConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataInputConfig; } - /** - * Prediction model parameters for Image Object Detection. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsImageObjectDetectionPredictionParams { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataBigQuerySource { /** - * The Model only returns predictions with at least this confidence score. Default value is 0.0 + * The URI of a BigQuery table. e.g. bq://projectId.bqDatasetId.bqTableId */ - confidenceThreshold?: number | null; + uri?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataGcsSource { /** - * The Model only returns up to that many top, by confidence score, predictions per instance. Note that number of returned predictions is also limited by metadata's predictionsLimit. Default value is 10. + * Cloud Storage URI of one or more files. Only CSV files are supported. The first line of the CSV file is used as the header. If there are multiple files, the header is the first line of the lexicographically first file, the other files must either contain the exact same header or omit the header. */ - maxPredictions?: number | null; + uri?: string[] | null; } /** - * Prediction model parameters for Image Segmentation. + * The tables Dataset's data source. The Dataset doesn't store the data directly, but only pointer(s) to its data. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsImageSegmentationPredictionParams { - /** - * When the model predicts category of pixels of the image, it will only provide predictions for pixels that it is at least this much confident about. All other pixels will be classified as background. Default value is 0.5. - */ - confidenceThreshold?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataInputConfig { + bigquerySource?: Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataBigQuerySource; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataGcsSource; } /** - * Prediction model parameters for Video Action Recognition. + * Annotation details specific to text classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsVideoActionRecognitionPredictionParams { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextClassificationAnnotation { /** - * The Model only returns predictions with at least this confidence score. Default value is 0.0 + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - confidenceThreshold?: number | null; + annotationSpecId?: string | null; /** - * The model only returns up to that many top, by confidence score, predictions per frame of the video. If this number is very high, the Model may return fewer predictions per frame. Default value is 50. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - maxPredictions?: number | null; + displayName?: string | null; } /** - * Prediction model parameters for Video Classification. + * Payload of Text DataItem. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsVideoClassificationPredictionParams { - /** - * The Model only returns predictions with at least this confidence score. Default value is 0.0 - */ - confidenceThreshold?: number | null; - /** - * The Model only returns up to that many top, by confidence score, predictions per instance. If this number is very high, the Model may return fewer predictions. Default value is 10,000. - */ - maxPredictions?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextDataItem { /** - * Set to true to request classification for a video at one-second intervals. Vertex AI returns labels and their confidence scores for each second of the entire time segment of the video that user specified in the input WARNING: Model evaluation is not done for this classification type, the quality of it depends on the training data, but there are no metrics provided to describe that quality. Default value is false + * Output only. Google Cloud Storage URI points to the original text in user's bucket. The text file is up to 10MB in size. */ - oneSecIntervalClassification?: boolean | null; + gcsUri?: string | null; + } + /** + * The metadata of Datasets that contain Text DataItems. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextDatasetMetadata { /** - * Set to true to request segment-level classification. Vertex AI returns labels and their confidence scores for the entire time segment of the video that user specified in the input instance. Default value is true + * Points to a YAML file stored on Google Cloud Storage describing payload of the Text DataItems that belong to this Dataset. */ - segmentClassification?: boolean | null; + dataItemSchemaUri?: string | null; /** - * Set to true to request shot-level classification. Vertex AI determines the boundaries for each camera shot in the entire time segment of the video that user specified in the input instance. Vertex AI then returns labels and their confidence scores for each detected shot, along with the start and end time of the shot. WARNING: Model evaluation is not done for this classification type, the quality of it depends on the training data, but there are no metrics provided to describe that quality. Default value is false + * Google Cloud Storage Bucket name that contains the blob data of this Dataset. */ - shotClassification?: boolean | null; + gcsBucket?: string | null; } /** - * Prediction model parameters for Video Object Tracking. + * Annotation details specific to text extraction. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictParamsVideoObjectTrackingPredictionParams { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextExtractionAnnotation { /** - * The Model only returns predictions with at least this confidence score. Default value is 0.0 + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - confidenceThreshold?: number | null; + annotationSpecId?: string | null; /** - * The model only returns up to that many top, by confidence score, predictions per frame of the video. If this number is very high, the Model may return fewer predictions per frame. Default value is 50. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - maxPredictions?: number | null; + displayName?: string | null; /** - * Only bounding boxes with shortest edge at least that long as a relative value of video frame size are returned. Default value is 0.0. + * The segment of the text content. */ - minBoundingBoxSize?: number | null; + textSegment?: Schema$GoogleCloudAiplatformV1beta1SchemaTextSegment; } /** - * Prediction output format for Image and Text Classification. + * The text segment inside of DataItem. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionClassificationPredictionResult { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextSegment { /** - * The Model's confidences in correctness of the predicted IDs, higher value means higher confidence. Order matches the Ids. + * The text content in the segment for output only. */ - confidences?: number[] | null; + content?: string | null; /** - * The display names of the AnnotationSpecs that had been identified, order matches the IDs. + * Zero-based character index of the first character past the end of the text segment (counting character from the beginning of the text). The character at the end_offset is NOT included in the text segment. */ - displayNames?: string[] | null; + endOffset?: string | null; /** - * The resource IDs of the AnnotationSpecs that had been identified. + * Zero-based character index of the first character of the text segment (counting characters from the beginning of the text). */ - ids?: string[] | null; + startOffset?: string | null; } /** - * Prediction output format for Image Object Detection. + * Annotation details specific to text sentiment. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionImageObjectDetectionPredictionResult { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextSentimentAnnotation { /** - * Bounding boxes, i.e. the rectangles over the image, that pinpoint the found AnnotationSpecs. Given in order that matches the IDs. Each bounding box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and `yMax`, which represent the extremal coordinates of the box. They are relative to the image size, and the point 0,0 is in the top left of the image. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - bboxes?: any[][] | null; + annotationSpecId?: string | null; /** - * The Model's confidences in correctness of the predicted IDs, higher value means higher confidence. Order matches the Ids. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - confidences?: number[] | null; + displayName?: string | null; /** - * The display names of the AnnotationSpecs that had been identified, order matches the IDs. + * The sentiment score for text. */ - displayNames?: string[] | null; + sentiment?: number | null; /** - * The resource IDs of the AnnotationSpecs that had been identified, ordered by the confidence score descendingly. + * The sentiment max score for text. */ - ids?: string[] | null; + sentimentMax?: number | null; } /** - * Prediction output format for Image Segmentation. + * The metadata of SavedQuery contains TextSentiment Annotations. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionImageSegmentationPredictionResult { - /** - * A PNG image where each pixel in the mask represents the category in which the pixel in the original image was predicted to belong to. The size of this image will be the same as the original image. The mapping between the AnntoationSpec and the color can be found in model's metadata. The model will choose the most likely category and if none of the categories reach the confidence threshold, the pixel will be marked as background. - */ - categoryMask?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextSentimentSavedQueryMetadata { /** - * A one channel image which is encoded as an 8bit lossless PNG. The size of the image will be the same as the original image. For a specific pixel, darker color means less confidence in correctness of the cateogry in the categoryMask for the corresponding pixel. Black means no confidence and white means complete confidence. + * The maximum sentiment of sentiment Anntoation in this SavedQuery. */ - confidenceMask?: string | null; + sentimentMax?: number | null; } /** - * Prediction output format for Tabular Classification. + * A time period inside of a DataItem that has a time dimension (e.g. video). */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTabularClassificationPredictionResult { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSegment { /** - * The name of the classes being classified, contains all possible values of the target column. + * End of the time segment (exclusive), represented as the duration since the start of the DataItem. */ - classes?: string[] | null; + endTimeOffset?: string | null; /** - * The model's confidence in each class being correct, higher value means higher confidence. The N-th score corresponds to the N-th class in classes. + * Start of the time segment (inclusive), represented as the duration since the start of the DataItem. */ - scores?: number[] | null; + startTimeOffset?: string | null; } /** - * Prediction output format for Tabular Regression. + * The metadata of Datasets that contain time series data. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTabularRegressionPredictionResult { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadata { + inputConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataInputConfig; /** - * The lower bound of the prediction interval. + * The column name of the time column that identifies time order in the time series. */ - lowerBound?: number | null; + timeColumn?: string | null; /** - * Quantile predictions, in 1-1 correspondence with quantile_values. + * The column name of the time series identifier column that identifies the time series. */ - quantilePredictions?: number[] | null; - /** - * Quantile values. - */ - quantileValues?: number[] | null; + timeSeriesIdentifierColumn?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataBigQuerySource { /** - * The upper bound of the prediction interval. + * The URI of a BigQuery table. */ - upperBound?: number | null; + uri?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataGcsSource { /** - * The regression value. + * Cloud Storage URI of one or more files. Only CSV files are supported. The first line of the CSV file is used as the header. If there are multiple files, the header is the first line of the lexicographically first file, the other files must either contain the exact same header or omit the header. */ - value?: number | null; + uri?: string[] | null; } /** - * Prediction output format for Text Extraction. + * The time series Dataset's data source. The Dataset doesn't store the data directly, but only pointer(s) to its data. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTextExtractionPredictionResult { - /** - * The Model's confidences in correctness of the predicted IDs, higher value means higher confidence. Order matches the Ids. - */ - confidences?: number[] | null; - /** - * The display names of the AnnotationSpecs that had been identified, order matches the IDs. - */ - displayNames?: string[] | null; - /** - * The resource IDs of the AnnotationSpecs that had been identified, ordered by the confidence score descendingly. - */ - ids?: string[] | null; - /** - * The end offsets, inclusive, of the text segment in which the AnnotationSpec has been identified. Expressed as a zero-based number of characters as measured from the start of the text snippet. - */ - textSegmentEndOffsets?: string[] | null; - /** - * The start offsets, inclusive, of the text segment in which the AnnotationSpec has been identified. Expressed as a zero-based number of characters as measured from the start of the text snippet. - */ - textSegmentStartOffsets?: string[] | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataInputConfig { + bigquerySource?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataBigQuerySource; + gcsSource?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataGcsSource; } /** - * Prediction output format for Text Sentiment + * A TrainingJob that trains and uploads an AutoML Forecasting Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTextSentimentPredictionResult { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecasting { /** - * The integer sentiment labels between 0 (inclusive) and sentimentMax label (inclusive), while 0 maps to the least positive sentiment and sentimentMax maps to the most positive one. The higher the score is, the more positive the sentiment in the text snippet is. Note: sentimentMax is an integer value between 1 (inclusive) and 10 (inclusive). + * The input parameters of this TrainingJob. */ - sentiment?: number | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTftFeatureImportance { - attributeColumns?: string[] | null; - attributeWeights?: number[] | null; - contextColumns?: string[] | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputs; /** - * TFT feature importance values. Each pair for {context/horizon/attribute\} should have the same shape since the weight corresponds to the column names. + * The metadata information. */ - contextWeights?: number[] | null; - horizonColumns?: string[] | null; - horizonWeights?: number[] | null; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingMetadata; } - /** - * Prediction output format for Time Series Forecasting. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTimeSeriesForecastingPredictionResult { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputs { /** - * Quantile predictions, in 1-1 correspondence with quantile_values. + * Additional experiment flags for the time series forcasting training. */ - quantilePredictions?: number[] | null; + additionalExperiments?: string[] | null; /** - * Quantile values. + * Names of columns that are available and provided when a forecast is requested. These columns contain information for the given entity (identified by the time_series_identifier_column column) that is known at forecast. For example, predicted weather for a specific day. */ - quantileValues?: number[] | null; + availableAtForecastColumns?: string[] | null; /** - * Only use these if TFt is enabled. + * The amount of time into the past training and prediction data is used for model training and prediction respectively. Expressed in number of units defined by the `data_granularity` field. */ - tftFeatureImportance?: Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionTftFeatureImportance; + contextWindow?: string | null; /** - * The regression value. + * Expected difference in time granularity between rows in the data. */ - value?: number | null; - } - /** - * Prediction output format for Video Action Recognition. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoActionRecognitionPredictionResult { + dataGranularity?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity; /** - * The Model's confidence in correction of this prediction, higher value means higher confidence. + * If probabilistic inference is enabled, the model will fit a distribution that captures the uncertainty of a prediction. At inference time, the predictive distribution is used to make a point prediction that minimizes the optimization objective. For example, the mean of a predictive distribution is the point prediction that minimizes RMSE loss. If quantiles are specified, then the quantiles of the distribution are also returned. The optimization objective cannot be minimize-quantile-loss. */ - confidence?: number | null; + enableProbabilisticInference?: boolean | null; /** - * The display name of the AnnotationSpec that had been identified. + * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. */ - displayName?: string | null; + exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** - * The resource ID of the AnnotationSpec that had been identified. + * The amount of time into the future for which forecasted values for the target are returned. Expressed in number of units defined by the `data_granularity` field. */ - id?: string | null; + forecastHorizon?: string | null; /** - * The end, exclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. + * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. */ - timeSegmentEnd?: string | null; + hierarchyConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig; /** - * The beginning, inclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. + * The geographical region based on which the holiday effect is applied in modeling by adding holiday categorical array feature that include all holidays matching the date. This option only allowed when data_granularity is day. By default, holiday effect modeling is disabled. To turn it on, specify the holiday region using this option. */ - timeSegmentStart?: string | null; - } - /** - * Prediction output format for Video Classification. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoClassificationPredictionResult { + holidayRegions?: string[] | null; /** - * The Model's confidence in correction of this prediction, higher value means higher confidence. + * Objective function the model is optimizing towards. The training process creates a model that optimizes the value of the objective function over the validation set. The supported optimization objectives: * "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - Minimize the mean absolute percentage error. */ - confidence?: number | null; + optimizationObjective?: string | null; /** - * The display name of the AnnotationSpec that had been identified. + * Quantiles to use for minimize-quantile-loss `optimization_objective`, or for probabilistic inference. Up to 5 quantiles are allowed of values between 0 and 1, exclusive. Required if the value of optimization_objective is minimize-quantile-loss. Represents the percent quantiles to use for that objective. Quantiles must be unique. */ - displayName?: string | null; + quantiles?: number[] | null; /** - * The resource ID of the AnnotationSpec that had been identified. + * The name of the column that the Model is to predict values for. This column must be unavailable at forecast. */ - id?: string | null; + targetColumn?: string | null; /** - * The end, exclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. Note that for 'segment-classification' prediction type, this equals the original 'timeSegmentEnd' from the input instance, for other types it is the end of a shot or a 1 second interval respectively. + * The name of the column that identifies time order in the time series. This column must be available at forecast. */ - timeSegmentEnd?: string | null; + timeColumn?: string | null; /** - * The beginning, inclusive, of the video's time segment in which the AnnotationSpec has been identified. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. Note that for 'segment-classification' prediction type, this equals the original 'timeSegmentStart' from the input instance, for other types it is the start of a shot or a 1 second interval respectively. + * Column names that should be used as attribute columns. The value of these columns does not vary as a function of time. For example, store ID or item color. */ - timeSegmentStart?: string | null; + timeSeriesAttributeColumns?: string[] | null; /** - * The type of the prediction. The requested types can be configured via parameters. This will be one of - segment-classification - shot-classification - one-sec-interval-classification + * The name of the column that identifies the time series. */ - type?: string | null; - } - /** - * Prediction output format for Video Object Tracking. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoObjectTrackingPredictionResult { + timeSeriesIdentifierColumn?: string | null; /** - * The Model's confidence in correction of this prediction, higher value means higher confidence. + * Required. The train budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a model for the given dataset, the training won't be attempted and will error. The train budget must be between 1,000 and 72,000 milli node hours, inclusive. */ - confidence?: number | null; + trainBudgetMilliNodeHours?: string | null; /** - * The display name of the AnnotationSpec that had been identified. + * Each transformation will apply transform function to given input column. And the result will be used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. */ - displayName?: string | null; + transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformation[]; /** - * All of the frames of the video in which a single object instance has been detected. The bounding boxes in the frames identify the same object. + * Names of columns that are unavailable when a forecast is requested. This column contains information for the given entity (identified by the time_series_identifier_column) that is unknown before the forecast For example, actual weather on a given day. */ - frames?: Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame[]; + unavailableAtForecastColumns?: string[] | null; /** - * The resource ID of the AnnotationSpec that had been identified. + * Validation options for the data validation component. The available options are: * "fail-pipeline" - default, will validate against the validation and fail the pipeline if it fails. * "ignore-validation" - ignore the results of the validation and continue */ - id?: string | null; + validationOptions?: string | null; /** - * The end, inclusive, of the video's time segment in which the object instance has been detected. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. + * Column name that should be used as the weight column. Higher values in this column give more importance to the row during model training. The column must have numeric values between 0 and 10000 inclusively; 0 means the row is ignored for training. If weight column field is not set, then all rows are assumed to have equal weight of 1. */ - timeSegmentEnd?: string | null; + weightColumn?: string | null; /** - * The beginning, inclusive, of the video's time segment in which the object instance has been detected. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. + * Config containing strategy for generating sliding windows. */ - timeSegmentStart?: string | null; + windowConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionWindowConfig; } /** - * The fields `xMin`, `xMax`, `yMin`, and `yMax` refer to a bounding box, i.e. the rectangle over the video frame pinpointing the found AnnotationSpec. The coordinates are relative to the frame size, and the point 0,0 is in the top left of the frame. + * A duration of time expressed in time granularity units. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame { - /** - * A time (frame) of a video in which the object has been detected. Expressed as a number of seconds as measured from the start of the video, with fractions up to a microsecond precision, and with "s" appended at the end. - */ - timeOffset?: string | null; - /** - * The rightmost coordinate of the bounding box. - */ - xMax?: number | null; - /** - * The leftmost coordinate of the bounding box. - */ - xMin?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity { /** - * The bottommost coordinate of the bounding box. + * The number of granularity_units between data points in the training data. If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all other values of `granularity_unit`, must be 1. */ - yMax?: number | null; + quantity?: string | null; /** - * The topmost coordinate of the bounding box. + * The time granularity unit of this time period. The supported units are: * "minute" * "hour" * "day" * "week" * "month" * "year" */ - yMin?: number | null; + unit?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformation { + auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationAutoTransformation; + categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationCategoricalTransformation; + numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationNumericTransformation; + text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTextTransformation; + timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTimestampTransformation; } /** - * The metadata of Datasets that contain tables data. + * Training pipeline will infer the proper transformation based on the statistic of dataset. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadata { - inputConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataInputConfig; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataBigQuerySource { - /** - * The URI of a BigQuery table. e.g. bq://projectId.bqDatasetId.bqTableId - */ - uri?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationAutoTransformation { + columnName?: string | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataGcsSource { - /** - * Cloud Storage URI of one or more files. Only CSV files are supported. The first line of the CSV file is used as the header. If there are multiple files, the header is the first line of the lexicographically first file, the other files must either contain the exact same header or omit the header. - */ - uri?: string[] | null; + /** + * Training pipeline will perform following transformation functions. * The categorical string as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. * Categories that appear less than 5 times in the training dataset are treated as the "unknown" category. The "unknown" category gets its own special lookup index and resulting embedding. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationCategoricalTransformation { + columnName?: string | null; } /** - * The tables Dataset's data source. The Dataset doesn't store the data directly, but only pointer(s) to its data. + * Training pipeline will perform following transformation functions. * The value converted to float32. * The z_score of the value. * log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * z_score of log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * A boolean value that indicates whether the value is valid. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataInputConfig { - bigquerySource?: Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataBigQuerySource; - gcsSource?: Schema$GoogleCloudAiplatformV1beta1SchemaTablesDatasetMetadataGcsSource; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationNumericTransformation { + columnName?: string | null; } /** - * Annotation details specific to text classification. + * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextClassificationAnnotation { - /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. - */ - annotationSpecId?: string | null; - /** - * The display name of the AnnotationSpec that this Annotation pertains to. - */ - displayName?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTextTransformation { + columnName?: string | null; } /** - * Payload of Text DataItem. + * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextDataItem { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTimestampTransformation { + columnName?: string | null; /** - * Output only. Google Cloud Storage URI points to the original text in user's bucket. The text file is up to 10MB in size. + * The format in which that time field is expressed. The time_format must either be one of: * `unix-seconds` * `unix-milliseconds` * `unix-microseconds` * `unix-nanoseconds` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in `strftime` syntax. If time_format is not set, then the default format is RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ - gcsUri?: string | null; + timeFormat?: string | null; } /** - * The metadata of Datasets that contain Text DataItems. + * Model metadata specific to AutoML Forecasting. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextDatasetMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingMetadata { /** - * Points to a YAML file stored on Google Cloud Storage describing payload of the Text DataItems that belong to this Dataset. + * BigQuery destination uri for exported evaluated examples. */ - dataItemSchemaUri?: string | null; + evaluatedDataItemsBigqueryUri?: string | null; /** - * Google Cloud Storage Bucket name that contains the blob data of this Dataset. + * Output only. The actual training cost of the model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. */ - gcsBucket?: string | null; + trainCostMilliNodeHours?: string | null; } /** - * Annotation details specific to text extraction. + * A TrainingJob that trains and uploads an AutoML Image Classification Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextExtractionAnnotation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassification { /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. + * The input parameters of this TrainingJob. */ - annotationSpecId?: string | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs; /** - * The display name of the AnnotationSpec that this Annotation pertains to. + * The metadata information. */ - displayName?: string | null; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs { /** - * The segment of the text content. + * The ID of the `base` model. If it is specified, the new model will be trained based on the `base` model. Otherwise, the new model will be trained from scratch. The `base` model must be in the same Project and Location as the new Model to train, and have the same modelType. */ - textSegment?: Schema$GoogleCloudAiplatformV1beta1SchemaTextSegment; - } - /** - * The text segment inside of DataItem. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextSegment { + baseModelId?: string | null; /** - * The text content in the segment for output only. + * The training budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The actual metadata.costMilliNodeHours will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using the full budget and the metadata.successfulStopReason will be `model-converged`. Note, node_hour = actual_hour * number_of_nodes_involved. For modelType `cloud`(default), the budget must be between 8,000 and 800,000 milli node hours, inclusive. The default value is 192,000 which represents one day in wall time, considering 8 nodes are used. For model types `mobile-tf-low-latency-1`, `mobile-tf-versatile-1`, `mobile-tf-high-accuracy-1`, the training budget must be between 1,000 and 100,000 milli node hours, inclusive. The default value is 24,000 which represents one day in wall time on a single node that is used. */ - content?: string | null; + budgetMilliNodeHours?: string | null; /** - * Zero-based character index of the first character past the end of the text segment (counting character from the beginning of the text). The character at the end_offset is NOT included in the text segment. + * Use the entire training budget. This disables the early stopping feature. When false the early stopping feature is enabled, which means that AutoML Image Classification might stop training before the entire training budget has been used. */ - endOffset?: string | null; + disableEarlyStopping?: boolean | null; + modelType?: string | null; /** - * Zero-based character index of the first character of the text segment (counting characters from the beginning of the text). + * If false, a single-label (multi-class) Model will be trained (i.e. assuming that for each image just up to one annotation may be applicable). If true, a multi-label Model will be trained (i.e. assuming that for each image multiple annotations may be applicable). */ - startOffset?: string | null; - } - /** - * Annotation details specific to text sentiment. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextSentimentAnnotation { + multiLabel?: boolean | null; /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. + * Trainer type for Vision TrainRequest. */ - annotationSpecId?: string | null; + tunableParameter?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter; /** - * The display name of the AnnotationSpec that this Annotation pertains to. + * The ID of `base` model for upTraining. If it is specified, the new model will be upTrained based on the `base` model for upTraining. Otherwise, the new model will be trained from scratch. The `base` model for upTraining must be in the same Project and Location as the new Model to train, and have the same modelType. */ - displayName?: string | null; + uptrainBaseModelId?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata { /** - * The sentiment score for text. + * The actual training cost of creating this model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed inputs.budgetMilliNodeHours. */ - sentiment?: number | null; + costMilliNodeHours?: string | null; /** - * The sentiment max score for text. + * For successful job completions, this is the reason why the job has finished. */ - sentimentMax?: number | null; + successfulStopReason?: string | null; } /** - * The metadata of SavedQuery contains TextSentiment Annotations. + * A TrainingJob that trains and uploads an AutoML Image Object Detection Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTextSentimentSavedQueryMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetection { /** - * The maximum sentiment of sentiment Anntoation in this SavedQuery. + * The input parameters of this TrainingJob. */ - sentimentMax?: number | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs; + /** + * The metadata information + */ + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata; } - /** - * A time period inside of a DataItem that has a time dimension (e.g. video). - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSegment { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs { /** - * End of the time segment (exclusive), represented as the duration since the start of the DataItem. + * The training budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The actual metadata.costMilliNodeHours will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using the full budget and the metadata.successfulStopReason will be `model-converged`. Note, node_hour = actual_hour * number_of_nodes_involved. For modelType `cloud`(default), the budget must be between 20,000 and 900,000 milli node hours, inclusive. The default value is 216,000 which represents one day in wall time, considering 9 nodes are used. For model types `mobile-tf-low-latency-1`, `mobile-tf-versatile-1`, `mobile-tf-high-accuracy-1` the training budget must be between 1,000 and 100,000 milli node hours, inclusive. The default value is 24,000 which represents one day in wall time on a single node that is used. */ - endTimeOffset?: string | null; + budgetMilliNodeHours?: string | null; /** - * Start of the time segment (inclusive), represented as the duration since the start of the DataItem. + * Use the entire training budget. This disables the early stopping feature. When false the early stopping feature is enabled, which means that AutoML Image Object Detection might stop training before the entire training budget has been used. */ - startTimeOffset?: string | null; - } - /** - * The metadata of Datasets that contain time series data. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadata { - inputConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataInputConfig; + disableEarlyStopping?: boolean | null; + modelType?: string | null; /** - * The column name of the time column that identifies time order in the time series. + * Trainer type for Vision TrainRequest. */ - timeColumn?: string | null; + tunableParameter?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter; /** - * The column name of the time series identifier column that identifies the time series. + * The ID of `base` model for upTraining. If it is specified, the new model will be upTrained based on the `base` model for upTraining. Otherwise, the new model will be trained from scratch. The `base` model for upTraining must be in the same Project and Location as the new Model to train, and have the same modelType. */ - timeSeriesIdentifierColumn?: string | null; + uptrainBaseModelId?: string | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataBigQuerySource { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata { /** - * The URI of a BigQuery table. + * The actual training cost of creating this model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed inputs.budgetMilliNodeHours. */ - uri?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataGcsSource { + costMilliNodeHours?: string | null; /** - * Cloud Storage URI of one or more files. Only CSV files are supported. The first line of the CSV file is used as the header. If there are multiple files, the header is the first line of the lexicographically first file, the other files must either contain the exact same header or omit the header. + * For successful job completions, this is the reason why the job has finished. */ - uri?: string[] | null; - } - /** - * The time series Dataset's data source. The Dataset doesn't store the data directly, but only pointer(s) to its data. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataInputConfig { - bigquerySource?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataBigQuerySource; - gcsSource?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSeriesDatasetMetadataGcsSource; + successfulStopReason?: string | null; } /** - * A TrainingJob that trains and uploads an AutoML Forecasting Model. + * A TrainingJob that trains and uploads an AutoML Image Segmentation Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecasting { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentation { /** * The input parameters of this TrainingJob. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputs; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs; /** * The metadata information. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingMetadata; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputs { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs { /** - * Additional experiment flags for the time series forcasting training. + * The ID of the `base` model. If it is specified, the new model will be trained based on the `base` model. Otherwise, the new model will be trained from scratch. The `base` model must be in the same Project and Location as the new Model to train, and have the same modelType. */ - additionalExperiments?: string[] | null; + baseModelId?: string | null; /** - * Names of columns that are available and provided when a forecast is requested. These columns contain information for the given entity (identified by the time_series_identifier_column column) that is known at forecast. For example, predicted weather for a specific day. + * The training budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The actual metadata.costMilliNodeHours will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using the full budget and the metadata.successfulStopReason will be `model-converged`. Note, node_hour = actual_hour * number_of_nodes_involved. Or actual_wall_clock_hours = train_budget_milli_node_hours / (number_of_nodes_involved * 1000) For modelType `cloud-high-accuracy-1`(default), the budget must be between 20,000 and 2,000,000 milli node hours, inclusive. The default value is 192,000 which represents one day in wall time (1000 milli * 24 hours * 8 nodes). */ - availableAtForecastColumns?: string[] | null; + budgetMilliNodeHours?: string | null; + modelType?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata { /** - * The amount of time into the past training and prediction data is used for model training and prediction respectively. Expressed in number of units defined by the `data_granularity` field. + * The actual training cost of creating this model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed inputs.budgetMilliNodeHours. */ - contextWindow?: string | null; + costMilliNodeHours?: string | null; /** - * Expected difference in time granularity between rows in the data. + * For successful job completions, this is the reason why the job has finished. */ - dataGranularity?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity; + successfulStopReason?: string | null; + } + /** + * A wrapper class which contains the tunable parameters in an AutoML Image training job. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter { /** - * If probabilistic inference is enabled, the model will fit a distribution that captures the uncertainty of a prediction. At inference time, the predictive distribution is used to make a point prediction that minimizes the optimization objective. For example, the mean of a predictive distribution is the point prediction that minimizes RMSE loss. If quantiles are specified, then the quantiles of the distribution are also returned. The optimization objective cannot be minimize-quantile-loss. + * Optional. An unique name of pretrained model checkpoint provided in model garden, it will be mapped to a GCS location internally. */ - enableProbabilisticInference?: boolean | null; + checkpointName?: string | null; /** - * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. + * Customizable dataset settings, used in the `model_garden_trainer`. */ - exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; + datasetConfig?: {[key: string]: string} | null; /** - * The amount of time into the future for which forecasted values for the target are returned. Expressed in number of units defined by the `data_granularity` field. + * Optioinal. StudySpec of hyperparameter tuning job. Required for `model_garden_trainer`. */ - forecastHorizon?: string | null; + studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; /** - * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. + * Customizable trainer settings, used in the `model_garden_trainer`. */ - hierarchyConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig; + trainerConfig?: {[key: string]: string} | null; + trainerType?: string | null; + } + /** + * A TrainingJob that trains and uploads an AutoML Tables Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTables { /** - * The geographical region based on which the holiday effect is applied in modeling by adding holiday categorical array feature that include all holidays matching the date. This option only allowed when data_granularity is day. By default, holiday effect modeling is disabled. To turn it on, specify the holiday region using this option. + * The input parameters of this TrainingJob. */ - holidayRegions?: string[] | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputs; /** - * Objective function the model is optimizing towards. The training process creates a model that optimizes the value of the objective function over the validation set. The supported optimization objectives: * "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - Minimize the mean absolute percentage error. + * The metadata information. */ - optimizationObjective?: string | null; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesMetadata; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputs { /** - * Quantiles to use for minimize-quantile-loss `optimization_objective`, or for probabilistic inference. Up to 5 quantiles are allowed of values between 0 and 1, exclusive. Required if the value of optimization_objective is minimize-quantile-loss. Represents the percent quantiles to use for that objective. Quantiles must be unique. + * Additional experiment flags for the Tables training pipeline. */ - quantiles?: number[] | null; + additionalExperiments?: string[] | null; /** - * The name of the column that the Model is to predict values for. This column must be unavailable at forecast. + * Use the entire training budget. This disables the early stopping feature. By default, the early stopping feature is enabled, which means that AutoML Tables might stop training before the entire training budget has been used. */ - targetColumn?: string | null; + disableEarlyStopping?: boolean | null; /** - * The name of the column that identifies time order in the time series. This column must be available at forecast. + * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. */ - timeColumn?: string | null; + exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** - * Column names that should be used as attribute columns. The value of these columns does not vary as a function of time. For example, store ID or item color. + * Objective function the model is optimizing towards. The training process creates a model that maximizes/minimizes the value of the objective function over the validation set. The supported optimization objectives depend on the prediction type. If the field is not set, a default objective function is used. classification (binary): "maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC) curve. "minimize-log-loss" - Minimize log loss. "maximize-au-prc" - Maximize the area under the precision-recall curve. "maximize-precision-at-recall" - Maximize precision for a specified recall value. "maximize-recall-at-precision" - Maximize recall for a specified precision value. classification (multi-class): "minimize-log-loss" (default) - Minimize log loss. regression: "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). */ - timeSeriesAttributeColumns?: string[] | null; + optimizationObjective?: string | null; /** - * The name of the column that identifies the time series. + * Required when optimization_objective is "maximize-recall-at-precision". Must be between 0 and 1, inclusive. */ - timeSeriesIdentifierColumn?: string | null; + optimizationObjectivePrecisionValue?: number | null; /** - * Required. The train budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a model for the given dataset, the training won't be attempted and will error. The train budget must be between 1,000 and 72,000 milli node hours, inclusive. + * Required when optimization_objective is "maximize-precision-at-recall". Must be between 0 and 1, inclusive. */ - trainBudgetMilliNodeHours?: string | null; + optimizationObjectiveRecallValue?: number | null; /** - * Each transformation will apply transform function to given input column. And the result will be used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. + * The type of prediction the Model is to produce. "classification" - Predict one out of multiple target values is picked for each row. "regression" - Predict a value based on its relation to other values. This type is available only to columns that contain semantically numeric values, i.e. integers or floating point number, even if stored as e.g. strings. */ - transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformation[]; + predictionType?: string | null; /** - * Names of columns that are unavailable when a forecast is requested. This column contains information for the given entity (identified by the time_series_identifier_column) that is unknown before the forecast For example, actual weather on a given day. + * The column name of the target column that the model is to predict. */ - unavailableAtForecastColumns?: string[] | null; + targetColumn?: string | null; /** - * Validation options for the data validation component. The available options are: * "fail-pipeline" - default, will validate against the validation and fail the pipeline if it fails. * "ignore-validation" - ignore the results of the validation and continue + * Required. The train budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a model for the given dataset, the training won't be attempted and will error. The train budget must be between 1,000 and 72,000 milli node hours, inclusive. */ - validationOptions?: string | null; + trainBudgetMilliNodeHours?: string | null; /** - * Column name that should be used as the weight column. Higher values in this column give more importance to the row during model training. The column must have numeric values between 0 and 10000 inclusively; 0 means the row is ignored for training. If weight column field is not set, then all rows are assumed to have equal weight of 1. + * Each transformation will apply transform function to given input column. And the result will be used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. */ - weightColumn?: string | null; + transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformation[]; /** - * Config containing strategy for generating sliding windows. + * Column name that should be used as the weight column. Higher values in this column give more importance to the row during model training. The column must have numeric values between 0 and 10000 inclusively; 0 means the row is ignored for training. If weight column field is not set, then all rows are assumed to have equal weight of 1. */ - windowConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionWindowConfig; + weightColumnName?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformation { + auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationAutoTransformation; + categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalTransformation; + numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericTransformation; + repeatedCategorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalArrayTransformation; + repeatedNumeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericArrayTransformation; + repeatedText?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextArrayTransformation; + text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextTransformation; + timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTimestampTransformation; } /** - * A duration of time expressed in time granularity units. + * Training pipeline will infer the proper transformation based on the statistic of dataset. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity { - /** - * The number of granularity_units between data points in the training data. If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all other values of `granularity_unit`, must be 1. - */ - quantity?: string | null; - /** - * The time granularity unit of this time period. The supported units are: * "minute" * "hour" * "day" * "week" * "month" * "year" - */ - unit?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformation { - auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationAutoTransformation; - categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationCategoricalTransformation; - numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationNumericTransformation; - text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTextTransformation; - timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTimestampTransformation; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationAutoTransformation { + columnName?: string | null; } /** - * Training pipeline will infer the proper transformation based on the statistic of dataset. + * Treats the column as categorical array and performs following transformation functions. * For each element in the array, convert the category name to a dictionary lookup index and generate an embedding for each index. Combine the embedding of all elements into a single embedding using the mean. * Empty arrays treated as an embedding of zeroes. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationAutoTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalArrayTransformation { columnName?: string | null; } /** * Training pipeline will perform following transformation functions. * The categorical string as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. * Categories that appear less than 5 times in the training dataset are treated as the "unknown" category. The "unknown" category gets its own special lookup index and resulting embedding. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationCategoricalTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalTransformation { + columnName?: string | null; + } + /** + * Treats the column as numerical array and performs following transformation functions. * All transformations for Numerical types applied to the average of the all elements. * The average of empty arrays is treated as zero. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericArrayTransformation { columnName?: string | null; + /** + * If invalid values is allowed, the training pipeline will create a boolean feature that indicated whether the value is valid. Otherwise, the training pipeline will discard the input row from trainining data. + */ + invalidValuesAllowed?: boolean | null; } /** * Training pipeline will perform following transformation functions. * The value converted to float32. * The z_score of the value. * log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * z_score of log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * A boolean value that indicates whether the value is valid. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationNumericTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericTransformation { columnName?: string | null; + /** + * If invalid values is allowed, the training pipeline will create a boolean feature that indicated whether the value is valid. Otherwise, the training pipeline will discard the input row from trainining data. + */ + invalidValuesAllowed?: boolean | null; } /** - * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. + * Treats the column as text array and performs following transformation functions. * Concatenate all text values in the array into a single text value using a space (" ") as a delimiter, and then treat the result as a single text value. Apply the transformations for Text columns. * Empty arrays treated as an empty text. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTextTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextArrayTransformation { columnName?: string | null; } /** - * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. + * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Tokenize text to words. Convert each words to a dictionary lookup index and generate an embedding for each index. Combine the embedding of all elements into a single embedding using the mean. * Tokenization is based on unicode script boundaries. * Missing values get their own lookup index and resulting embedding. * Stop-words receive no special treatment and are not removed. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTimestampTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextTransformation { + columnName?: string | null; + } + /** + * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the * timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTimestampTransformation { columnName?: string | null; + /** + * If invalid values is allowed, the training pipeline will create a boolean feature that indicated whether the value is valid. Otherwise, the training pipeline will discard the input row from trainining data. + */ + invalidValuesAllowed?: boolean | null; /** * The format in which that time field is expressed. The time_format must either be one of: * `unix-seconds` * `unix-milliseconds` * `unix-microseconds` * `unix-nanoseconds` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in `strftime` syntax. If time_format is not set, then the default format is RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ timeFormat?: string | null; } /** - * Model metadata specific to AutoML Forecasting. + * Model metadata specific to AutoML Tables. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlForecastingMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesMetadata { /** * BigQuery destination uri for exported evaluated examples. */ @@ -13943,199 +13764,246 @@ export namespace aiplatform_v1beta1 { trainCostMilliNodeHours?: string | null; } /** - * A TrainingJob that trains and uploads an AutoML Image Classification Model. + * A TrainingJob that trains and uploads an AutoML Text Classification Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassification { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextClassification { /** * The input parameters of this TrainingJob. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextClassificationInputs; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextClassificationInputs { + multiLabel?: boolean | null; + } + /** + * A TrainingJob that trains and uploads an AutoML Text Extraction Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextExtraction { /** - * The metadata information. + * The input parameters of this TrainingJob. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextExtractionInputs; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextExtractionInputs {} + /** + * A TrainingJob that trains and uploads an AutoML Text Sentiment Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextSentiment { /** - * The ID of the `base` model. If it is specified, the new model will be trained based on the `base` model. Otherwise, the new model will be trained from scratch. The `base` model must be in the same Project and Location as the new Model to train, and have the same modelType. + * The input parameters of this TrainingJob. */ - baseModelId?: string | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextSentimentInputs; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextSentimentInputs { /** - * The training budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The actual metadata.costMilliNodeHours will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using the full budget and the metadata.successfulStopReason will be `model-converged`. Note, node_hour = actual_hour * number_of_nodes_involved. For modelType `cloud`(default), the budget must be between 8,000 and 800,000 milli node hours, inclusive. The default value is 192,000 which represents one day in wall time, considering 8 nodes are used. For model types `mobile-tf-low-latency-1`, `mobile-tf-versatile-1`, `mobile-tf-high-accuracy-1`, the training budget must be between 1,000 and 100,000 milli node hours, inclusive. The default value is 24,000 which represents one day in wall time on a single node that is used. + * A sentiment is expressed as an integer ordinal, where higher value means a more positive sentiment. The range of sentiments that will be used is between 0 and sentimentMax (inclusive on both ends), and all the values in the range must be represented in the dataset before a model can be created. Only the Annotations with this sentimentMax will be used for training. sentimentMax value must be between 1 and 10 (inclusive). */ - budgetMilliNodeHours?: string | null; + sentimentMax?: number | null; + } + /** + * A TrainingJob that trains and uploads an AutoML Video Action Recognition Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoActionRecognition { /** - * Use the entire training budget. This disables the early stopping feature. When false the early stopping feature is enabled, which means that AutoML Image Classification might stop training before the entire training budget has been used. + * The input parameters of this TrainingJob. */ - disableEarlyStopping?: boolean | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoActionRecognitionInputs; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoActionRecognitionInputs { modelType?: string | null; + } + /** + * A TrainingJob that trains and uploads an AutoML Video Classification Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoClassification { /** - * If false, a single-label (multi-class) Model will be trained (i.e. assuming that for each image just up to one annotation may be applicable). If true, a multi-label Model will be trained (i.e. assuming that for each image multiple annotations may be applicable). + * The input parameters of this TrainingJob. */ - multiLabel?: boolean | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoClassificationInputs; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoClassificationInputs { + modelType?: string | null; + } + /** + * A TrainingJob that trains and uploads an AutoML Video ObjectTracking Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoObjectTracking { /** - * Trainer type for Vision TrainRequest. + * The input parameters of this TrainingJob. */ - tunableParameter?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoObjectTrackingInputs; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoObjectTrackingInputs { + modelType?: string | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionCustomJobMetadata { /** - * The ID of `base` model for upTraining. If it is specified, the new model will be upTrained based on the `base` model for upTraining. Otherwise, the new model will be trained from scratch. The `base` model for upTraining must be in the same Project and Location as the new Model to train, and have the same modelType. + * The resource name of the CustomJob that has been created to carry out this custom task. */ - uptrainBaseModelId?: string | null; + backingCustomJob?: string | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata { + /** + * A TrainingJob that trains a custom code Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionCustomTask { /** - * The actual training cost of creating this model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed inputs.budgetMilliNodeHours. + * The input parameters of this CustomTask. */ - costMilliNodeHours?: string | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; /** - * For successful job completions, this is the reason why the job has finished. + * The metadata information. */ - successfulStopReason?: string | null; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionCustomJobMetadata; } /** - * A TrainingJob that trains and uploads an AutoML Image Object Detection Model. + * Configuration for exporting test set predictions to a BigQuery table. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetection { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig { /** - * The input parameters of this TrainingJob. + * URI of desired destination BigQuery table. Expected format: `bq://{project_id\}:{dataset_id\}:{table\}` If not specified, then results are exported to the following auto-created BigQuery table: `{project_id\}:export_evaluated_examples_{model_name\}_{yyyy_MM_dd'T'HH_mm_ss_SSS'Z'\}.evaluated_examples` */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs; + destinationBigqueryUri?: string | null; /** - * The metadata information + * If true and an export destination is specified, then the contents of the destination are overwritten. Otherwise, if the export destination already exists, then the export operation fails. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata; + overrideExistingTable?: boolean | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs { + /** + * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig { /** - * The training budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The actual metadata.costMilliNodeHours will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using the full budget and the metadata.successfulStopReason will be `model-converged`. Note, node_hour = actual_hour * number_of_nodes_involved. For modelType `cloud`(default), the budget must be between 20,000 and 900,000 milli node hours, inclusive. The default value is 216,000 which represents one day in wall time, considering 9 nodes are used. For model types `mobile-tf-low-latency-1`, `mobile-tf-versatile-1`, `mobile-tf-high-accuracy-1` the training budget must be between 1,000 and 100,000 milli node hours, inclusive. The default value is 24,000 which represents one day in wall time on a single node that is used. + * A list of time series attribute column names that define the time series hierarchy. Only one level of hierarchy is supported, ex. 'region' for a hierarchy of stores or 'department' for a hierarchy of products. If multiple columns are specified, time series will be grouped by their combined values, ex. ('blue', 'large') for 'color' and 'size', up to 5 columns are accepted. If no group columns are specified, all time series are considered to be part of the same group. */ - budgetMilliNodeHours?: string | null; + groupColumns?: string[] | null; /** - * Use the entire training budget. This disables the early stopping feature. When false the early stopping feature is enabled, which means that AutoML Image Object Detection might stop training before the entire training budget has been used. + * The weight of the loss for predictions aggregated over both the horizon and time series in the same hierarchy group. */ - disableEarlyStopping?: boolean | null; - modelType?: string | null; + groupTemporalTotalWeight?: number | null; /** - * Trainer type for Vision TrainRequest. + * The weight of the loss for predictions aggregated over time series in the same group. */ - tunableParameter?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter; + groupTotalWeight?: number | null; /** - * The ID of `base` model for upTraining. If it is specified, the new model will be upTrained based on the `base` model for upTraining. Otherwise, the new model will be trained from scratch. The `base` model for upTraining must be in the same Project and Location as the new Model to train, and have the same modelType. + * The weight of the loss for predictions aggregated over the horizon for a single time series. */ - uptrainBaseModelId?: string | null; + temporalTotalWeight?: number | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobMetadata { /** - * The actual training cost of creating this model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed inputs.budgetMilliNodeHours. + * The resource name of the HyperparameterTuningJob that has been created to carry out this HyperparameterTuning task. */ - costMilliNodeHours?: string | null; + backingHyperparameterTuningJob?: string | null; /** - * For successful job completions, this is the reason why the job has finished. + * The resource name of the CustomJob that has been created to run the best Trial of this HyperparameterTuning task. */ - successfulStopReason?: string | null; + bestTrialBackingCustomJob?: string | null; } - /** - * A TrainingJob that trains and uploads an AutoML Image Segmentation Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentation { - /** - * The input parameters of this TrainingJob. - */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec { /** - * The metadata information. + * The number of failed Trials that need to be seen before failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail before the whole job fails. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs { + maxFailedTrialCount?: number | null; /** - * The ID of the `base` model. If it is specified, the new model will be trained based on the `base` model. Otherwise, the new model will be trained from scratch. The `base` model must be in the same Project and Location as the new Model to train, and have the same modelType. + * The desired total number of Trials. */ - baseModelId?: string | null; + maxTrialCount?: number | null; /** - * The training budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The actual metadata.costMilliNodeHours will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using the full budget and the metadata.successfulStopReason will be `model-converged`. Note, node_hour = actual_hour * number_of_nodes_involved. Or actual_wall_clock_hours = train_budget_milli_node_hours / (number_of_nodes_involved * 1000) For modelType `cloud-high-accuracy-1`(default), the budget must be between 20,000 and 2,000,000 milli node hours, inclusive. The default value is 192,000 which represents one day in wall time (1000 milli * 24 hours * 8 nodes). + * The desired number of Trials to run in parallel. */ - budgetMilliNodeHours?: string | null; - modelType?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata { + parallelTrialCount?: number | null; /** - * The actual training cost of creating this model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed inputs.budgetMilliNodeHours. + * Study configuration of the HyperparameterTuningJob. */ - costMilliNodeHours?: string | null; + studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; /** - * For successful job completions, this is the reason why the job has finished. + * The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. */ - successfulStopReason?: string | null; + trialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; } /** - * A wrapper class which contains the tunable parameters in an AutoML Image training job. + * A TrainingJob that tunes Hypererparameters of a custom code Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter { - /** - * Optional. An unique name of pretrained model checkpoint provided in model garden, it will be mapped to a GCS location internally. - */ - checkpointName?: string | null; - /** - * Customizable dataset settings, used in the `model_garden_trainer`. - */ - datasetConfig?: {[key: string]: string} | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningTask { /** - * Optioinal. StudySpec of hyperparameter tuning job. Required for `model_garden_trainer`. + * The input parameters of this HyperparameterTuningTask. */ - studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec; /** - * Customizable trainer settings, used in the `model_garden_trainer`. + * The metadata information. */ - trainerConfig?: {[key: string]: string} | null; - trainerType?: string | null; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobMetadata; } /** - * A TrainingJob that trains and uploads an AutoML Tables Model. + * A TrainingJob that trains and uploads an AutoML Forecasting Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTables { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecasting { /** * The input parameters of this TrainingJob. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputs; + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs; /** * The metadata information. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesMetadata; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputs { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs { /** - * Additional experiment flags for the Tables training pipeline. + * Additional experiment flags for the time series forcasting training. */ additionalExperiments?: string[] | null; /** - * Use the entire training budget. This disables the early stopping feature. By default, the early stopping feature is enabled, which means that AutoML Tables might stop training before the entire training budget has been used. + * Names of columns that are available and provided when a forecast is requested. These columns contain information for the given entity (identified by the time_series_identifier_column column) that is known at forecast. For example, predicted weather for a specific day. */ - disableEarlyStopping?: boolean | null; + availableAtForecastColumns?: string[] | null; /** - * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. + * The amount of time into the past training and prediction data is used for model training and prediction respectively. Expressed in number of units defined by the `data_granularity` field. */ - exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; + contextWindow?: string | null; /** - * Objective function the model is optimizing towards. The training process creates a model that maximizes/minimizes the value of the objective function over the validation set. The supported optimization objectives depend on the prediction type. If the field is not set, a default objective function is used. classification (binary): "maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC) curve. "minimize-log-loss" - Minimize log loss. "maximize-au-prc" - Maximize the area under the precision-recall curve. "maximize-precision-at-recall" - Maximize precision for a specified recall value. "maximize-recall-at-precision" - Maximize recall for a specified precision value. classification (multi-class): "minimize-log-loss" (default) - Minimize log loss. regression: "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). + * Expected difference in time granularity between rows in the data. */ - optimizationObjective?: string | null; + dataGranularity?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity; /** - * Required when optimization_objective is "maximize-recall-at-precision". Must be between 0 and 1, inclusive. + * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. */ - optimizationObjectivePrecisionValue?: number | null; + exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** - * Required when optimization_objective is "maximize-precision-at-recall". Must be between 0 and 1, inclusive. + * The amount of time into the future for which forecasted values for the target are returned. Expressed in number of units defined by the `data_granularity` field. */ - optimizationObjectiveRecallValue?: number | null; + forecastHorizon?: string | null; /** - * The type of prediction the Model is to produce. "classification" - Predict one out of multiple target values is picked for each row. "regression" - Predict a value based on its relation to other values. This type is available only to columns that contain semantically numeric values, i.e. integers or floating point number, even if stored as e.g. strings. + * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. */ - predictionType?: string | null; + hierarchyConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig; /** - * The column name of the target column that the model is to predict. + * The geographical region based on which the holiday effect is applied in modeling by adding holiday categorical array feature that include all holidays matching the date. This option only allowed when data_granularity is day. By default, holiday effect modeling is disabled. To turn it on, specify the holiday region using this option. + */ + holidayRegions?: string[] | null; + /** + * Objective function the model is optimizing towards. The training process creates a model that optimizes the value of the objective function over the validation set. The supported optimization objectives: * "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - Minimize the mean absolute percentage error. + */ + optimizationObjective?: string | null; + /** + * Quantiles to use for minimize-quantile-loss `optimization_objective`. Up to 5 quantiles are allowed of values between 0 and 1, exclusive. Required if the value of optimization_objective is minimize-quantile-loss. Represents the percent quantiles to use for that objective. Quantiles must be unique. + */ + quantiles?: number[] | null; + /** + * The name of the column that the Model is to predict values for. This column must be unavailable at forecast. */ targetColumn?: string | null; + /** + * The name of the column that identifies time order in the time series. This column must be available at forecast. + */ + timeColumn?: string | null; + /** + * Column names that should be used as attribute columns. The value of these columns does not vary as a function of time. For example, store ID or item color. + */ + timeSeriesAttributeColumns?: string[] | null; + /** + * The name of the column that identifies the time series. + */ + timeSeriesIdentifierColumn?: string | null; /** * Required. The train budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a model for the given dataset, the training won't be attempted and will error. The train budget must be between 1,000 and 72,000 milli node hours, inclusive. */ @@ -14143,90 +14011,82 @@ export namespace aiplatform_v1beta1 { /** * Each transformation will apply transform function to given input column. And the result will be used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. */ - transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformation[]; + transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformation[]; /** - * Column name that should be used as the weight column. Higher values in this column give more importance to the row during model training. The column must have numeric values between 0 and 10000 inclusively; 0 means the row is ignored for training. If weight column field is not set, then all rows are assumed to have equal weight of 1. + * Names of columns that are unavailable when a forecast is requested. This column contains information for the given entity (identified by the time_series_identifier_column) that is unknown before the forecast For example, actual weather on a given day. */ - weightColumnName?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformation { - auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationAutoTransformation; - categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalTransformation; - numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericTransformation; - repeatedCategorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalArrayTransformation; - repeatedNumeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericArrayTransformation; - repeatedText?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextArrayTransformation; - text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextTransformation; - timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTimestampTransformation; - } - /** - * Training pipeline will infer the proper transformation based on the statistic of dataset. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationAutoTransformation { - columnName?: string | null; + unavailableAtForecastColumns?: string[] | null; + /** + * Validation options for the data validation component. The available options are: * "fail-pipeline" - default, will validate against the validation and fail the pipeline if it fails. * "ignore-validation" - ignore the results of the validation and continue + */ + validationOptions?: string | null; + /** + * Column name that should be used as the weight column. Higher values in this column give more importance to the row during model training. The column must have numeric values between 0 and 10000 inclusively; 0 means the row is ignored for training. If weight column field is not set, then all rows are assumed to have equal weight of 1. This column must be available at forecast. + */ + weightColumn?: string | null; + /** + * Config containing strategy for generating sliding windows. + */ + windowConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionWindowConfig; } /** - * Treats the column as categorical array and performs following transformation functions. * For each element in the array, convert the category name to a dictionary lookup index and generate an embedding for each index. Combine the embedding of all elements into a single embedding using the mean. * Empty arrays treated as an embedding of zeroes. + * A duration of time expressed in time granularity units. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalArrayTransformation { - columnName?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity { + /** + * The number of granularity_units between data points in the training data. If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all other values of `granularity_unit`, must be 1. + */ + quantity?: string | null; + /** + * The time granularity unit of this time period. The supported units are: * "minute" * "hour" * "day" * "week" * "month" * "year" + */ + unit?: string | null; } - /** - * Training pipeline will perform following transformation functions. * The categorical string as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. * Categories that appear less than 5 times in the training dataset are treated as the "unknown" category. The "unknown" category gets its own special lookup index and resulting embedding. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalTransformation { - columnName?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformation { + auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationAutoTransformation; + categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationCategoricalTransformation; + numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationNumericTransformation; + text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTextTransformation; + timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTimestampTransformation; } /** - * Treats the column as numerical array and performs following transformation functions. * All transformations for Numerical types applied to the average of the all elements. * The average of empty arrays is treated as zero. + * Training pipeline will infer the proper transformation based on the statistic of dataset. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericArrayTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationAutoTransformation { columnName?: string | null; - /** - * If invalid values is allowed, the training pipeline will create a boolean feature that indicated whether the value is valid. Otherwise, the training pipeline will discard the input row from trainining data. - */ - invalidValuesAllowed?: boolean | null; } /** - * Training pipeline will perform following transformation functions. * The value converted to float32. * The z_score of the value. * log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * z_score of log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * A boolean value that indicates whether the value is valid. + * Training pipeline will perform following transformation functions. * The categorical string as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. * Categories that appear less than 5 times in the training dataset are treated as the "unknown" category. The "unknown" category gets its own special lookup index and resulting embedding. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationCategoricalTransformation { columnName?: string | null; - /** - * If invalid values is allowed, the training pipeline will create a boolean feature that indicated whether the value is valid. Otherwise, the training pipeline will discard the input row from trainining data. - */ - invalidValuesAllowed?: boolean | null; } /** - * Treats the column as text array and performs following transformation functions. * Concatenate all text values in the array into a single text value using a space (" ") as a delimiter, and then treat the result as a single text value. Apply the transformations for Text columns. * Empty arrays treated as an empty text. + * Training pipeline will perform following transformation functions. * The value converted to float32. * The z_score of the value. * log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * z_score of log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextArrayTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationNumericTransformation { columnName?: string | null; } /** - * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Tokenize text to words. Convert each words to a dictionary lookup index and generate an embedding for each index. Combine the embedding of all elements into a single embedding using the mean. * Tokenization is based on unicode script boundaries. * Missing values get their own lookup index and resulting embedding. * Stop-words receive no special treatment and are not removed. + * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTextTransformation { columnName?: string | null; } /** - * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the * timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. + * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTimestampTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTimestampTransformation { columnName?: string | null; - /** - * If invalid values is allowed, the training pipeline will create a boolean feature that indicated whether the value is valid. Otherwise, the training pipeline will discard the input row from trainining data. - */ - invalidValuesAllowed?: boolean | null; /** * The format in which that time field is expressed. The time_format must either be one of: * `unix-seconds` * `unix-milliseconds` * `unix-microseconds` * `unix-nanoseconds` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in `strftime` syntax. If time_format is not set, then the default format is RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ timeFormat?: string | null; } /** - * Model metadata specific to AutoML Tables. + * Model metadata specific to Seq2Seq Plus Forecasting. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTablesMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata { /** * BigQuery destination uri for exported evaluated examples. */ @@ -14237,242 +14097,71 @@ export namespace aiplatform_v1beta1 { trainCostMilliNodeHours?: string | null; } /** - * A TrainingJob that trains and uploads an AutoML Text Classification Model. + * A TrainingJob that trains and uploads an AutoML Forecasting Model. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextClassification { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecasting { /** * The input parameters of this TrainingJob. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextClassificationInputs; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextClassificationInputs { - multiLabel?: boolean | null; - } - /** - * A TrainingJob that trains and uploads an AutoML Text Extraction Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextExtraction { + inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputs; /** - * The input parameters of this TrainingJob. + * The metadata information. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextExtractionInputs; + metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingMetadata; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextExtractionInputs {} - /** - * A TrainingJob that trains and uploads an AutoML Text Sentiment Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextSentiment { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputs { /** - * The input parameters of this TrainingJob. + * Additional experiment flags for the time series forcasting training. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextSentimentInputs; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlTextSentimentInputs { + additionalExperiments?: string[] | null; /** - * A sentiment is expressed as an integer ordinal, where higher value means a more positive sentiment. The range of sentiments that will be used is between 0 and sentimentMax (inclusive on both ends), and all the values in the range must be represented in the dataset before a model can be created. Only the Annotations with this sentimentMax will be used for training. sentimentMax value must be between 1 and 10 (inclusive). + * Names of columns that are available and provided when a forecast is requested. These columns contain information for the given entity (identified by the time_series_identifier_column column) that is known at forecast. For example, predicted weather for a specific day. */ - sentimentMax?: number | null; - } - /** - * A TrainingJob that trains and uploads an AutoML Video Action Recognition Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoActionRecognition { + availableAtForecastColumns?: string[] | null; /** - * The input parameters of this TrainingJob. + * The amount of time into the past training and prediction data is used for model training and prediction respectively. Expressed in number of units defined by the `data_granularity` field. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoActionRecognitionInputs; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoActionRecognitionInputs { - modelType?: string | null; - } - /** - * A TrainingJob that trains and uploads an AutoML Video Classification Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoClassification { + contextWindow?: string | null; /** - * The input parameters of this TrainingJob. + * Expected difference in time granularity between rows in the data. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoClassificationInputs; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoClassificationInputs { - modelType?: string | null; - } - /** - * A TrainingJob that trains and uploads an AutoML Video ObjectTracking Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoObjectTracking { + dataGranularity?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsGranularity; /** - * The input parameters of this TrainingJob. + * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoObjectTrackingInputs; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionAutoMlVideoObjectTrackingInputs { - modelType?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionCustomJobMetadata { + exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** - * The resource name of the CustomJob that has been created to carry out this custom task. + * The amount of time into the future for which forecasted values for the target are returned. Expressed in number of units defined by the `data_granularity` field. */ - backingCustomJob?: string | null; - } - /** - * A TrainingJob that trains a custom code Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionCustomTask { + forecastHorizon?: string | null; /** - * The input parameters of this CustomTask. + * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. */ - inputs?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; + hierarchyConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig; /** - * The metadata information. + * The geographical region based on which the holiday effect is applied in modeling by adding holiday categorical array feature that include all holidays matching the date. This option only allowed when data_granularity is day. By default, holiday effect modeling is disabled. To turn it on, specify the holiday region using this option. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionCustomJobMetadata; - } - /** - * Configuration for exporting test set predictions to a BigQuery table. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig { + holidayRegions?: string[] | null; /** - * URI of desired destination BigQuery table. Expected format: `bq://{project_id\}:{dataset_id\}:{table\}` If not specified, then results are exported to the following auto-created BigQuery table: `{project_id\}:export_evaluated_examples_{model_name\}_{yyyy_MM_dd'T'HH_mm_ss_SSS'Z'\}.evaluated_examples` + * Objective function the model is optimizing towards. The training process creates a model that optimizes the value of the objective function over the validation set. The supported optimization objectives: * "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - Minimize the mean absolute percentage error. */ - destinationBigqueryUri?: string | null; + optimizationObjective?: string | null; /** - * If true and an export destination is specified, then the contents of the destination are overwritten. Otherwise, if the export destination already exists, then the export operation fails. + * Quantiles to use for minimize-quantile-loss `optimization_objective`. Up to 5 quantiles are allowed of values between 0 and 1, exclusive. Required if the value of optimization_objective is minimize-quantile-loss. Represents the percent quantiles to use for that objective. Quantiles must be unique. */ - overrideExistingTable?: boolean | null; - } - /** - * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig { + quantiles?: number[] | null; /** - * A list of time series attribute column names that define the time series hierarchy. Only one level of hierarchy is supported, ex. 'region' for a hierarchy of stores or 'department' for a hierarchy of products. If multiple columns are specified, time series will be grouped by their combined values, ex. ('blue', 'large') for 'color' and 'size', up to 5 columns are accepted. If no group columns are specified, all time series are considered to be part of the same group. + * The name of the column that the Model is to predict values for. This column must be unavailable at forecast. */ - groupColumns?: string[] | null; + targetColumn?: string | null; /** - * The weight of the loss for predictions aggregated over both the horizon and time series in the same hierarchy group. + * The name of the column that identifies time order in the time series. This column must be available at forecast. */ - groupTemporalTotalWeight?: number | null; + timeColumn?: string | null; /** - * The weight of the loss for predictions aggregated over time series in the same group. + * Column names that should be used as attribute columns. The value of these columns does not vary as a function of time. For example, store ID or item color. */ - groupTotalWeight?: number | null; - /** - * The weight of the loss for predictions aggregated over the horizon for a single time series. - */ - temporalTotalWeight?: number | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobMetadata { - /** - * The resource name of the HyperparameterTuningJob that has been created to carry out this HyperparameterTuning task. - */ - backingHyperparameterTuningJob?: string | null; - /** - * The resource name of the CustomJob that has been created to run the best Trial of this HyperparameterTuning task. - */ - bestTrialBackingCustomJob?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec { - /** - * The number of failed Trials that need to be seen before failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail before the whole job fails. - */ - maxFailedTrialCount?: number | null; - /** - * The desired total number of Trials. - */ - maxTrialCount?: number | null; - /** - * The desired number of Trials to run in parallel. - */ - parallelTrialCount?: number | null; - /** - * Study configuration of the HyperparameterTuningJob. - */ - studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; - /** - * The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. - */ - trialJobSpec?: Schema$GoogleCloudAiplatformV1beta1CustomJobSpec; - } - /** - * A TrainingJob that tunes Hypererparameters of a custom code Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningTask { - /** - * The input parameters of this HyperparameterTuningTask. - */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec; - /** - * The metadata information. - */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHyperparameterTuningJobMetadata; - } - /** - * A TrainingJob that trains and uploads an AutoML Forecasting Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecasting { - /** - * The input parameters of this TrainingJob. - */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs; - /** - * The metadata information. - */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs { - /** - * Additional experiment flags for the time series forcasting training. - */ - additionalExperiments?: string[] | null; - /** - * Names of columns that are available and provided when a forecast is requested. These columns contain information for the given entity (identified by the time_series_identifier_column column) that is known at forecast. For example, predicted weather for a specific day. - */ - availableAtForecastColumns?: string[] | null; - /** - * The amount of time into the past training and prediction data is used for model training and prediction respectively. Expressed in number of units defined by the `data_granularity` field. - */ - contextWindow?: string | null; - /** - * Expected difference in time granularity between rows in the data. - */ - dataGranularity?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity; - /** - * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. - */ - exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; - /** - * The amount of time into the future for which forecasted values for the target are returned. Expressed in number of units defined by the `data_granularity` field. - */ - forecastHorizon?: string | null; - /** - * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. - */ - hierarchyConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig; - /** - * The geographical region based on which the holiday effect is applied in modeling by adding holiday categorical array feature that include all holidays matching the date. This option only allowed when data_granularity is day. By default, holiday effect modeling is disabled. To turn it on, specify the holiday region using this option. - */ - holidayRegions?: string[] | null; - /** - * Objective function the model is optimizing towards. The training process creates a model that optimizes the value of the objective function over the validation set. The supported optimization objectives: * "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - Minimize the mean absolute percentage error. - */ - optimizationObjective?: string | null; - /** - * Quantiles to use for minimize-quantile-loss `optimization_objective`. Up to 5 quantiles are allowed of values between 0 and 1, exclusive. Required if the value of optimization_objective is minimize-quantile-loss. Represents the percent quantiles to use for that objective. Quantiles must be unique. - */ - quantiles?: number[] | null; - /** - * The name of the column that the Model is to predict values for. This column must be unavailable at forecast. - */ - targetColumn?: string | null; - /** - * The name of the column that identifies time order in the time series. This column must be available at forecast. - */ - timeColumn?: string | null; - /** - * Column names that should be used as attribute columns. The value of these columns does not vary as a function of time. For example, store ID or item color. - */ - timeSeriesAttributeColumns?: string[] | null; + timeSeriesAttributeColumns?: string[] | null; /** * The name of the column that identifies the time series. */ @@ -14484,7 +14173,7 @@ export namespace aiplatform_v1beta1 { /** * Each transformation will apply transform function to given input column. And the result will be used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. */ - transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformation[]; + transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformation[]; /** * Names of columns that are unavailable when a forecast is requested. This column contains information for the given entity (identified by the time_series_identifier_column) that is unknown before the forecast For example, actual weather on a given day. */ @@ -14505,7 +14194,7 @@ export namespace aiplatform_v1beta1 { /** * A duration of time expressed in time granularity units. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsGranularity { /** * The number of granularity_units between data points in the training data. If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all other values of `granularity_unit`, must be 1. */ @@ -14515,4391 +14204,3071 @@ export namespace aiplatform_v1beta1 { */ unit?: string | null; } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformation { - auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationAutoTransformation; - categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationCategoricalTransformation; - numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationNumericTransformation; - text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTextTransformation; - timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTimestampTransformation; + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformation { + auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationAutoTransformation; + categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationCategoricalTransformation; + numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationNumericTransformation; + text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTextTransformation; + timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTimestampTransformation; } /** * Training pipeline will infer the proper transformation based on the statistic of dataset. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationAutoTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationAutoTransformation { columnName?: string | null; } /** * Training pipeline will perform following transformation functions. * The categorical string as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. * Categories that appear less than 5 times in the training dataset are treated as the "unknown" category. The "unknown" category gets its own special lookup index and resulting embedding. */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationCategoricalTransformation { - columnName?: string | null; - } - /** - * Training pipeline will perform following transformation functions. * The value converted to float32. * The z_score of the value. * log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * z_score of log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationNumericTransformation { - columnName?: string | null; - } - /** - * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTextTransformation { - columnName?: string | null; - } - /** - * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTimestampTransformation { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationCategoricalTransformation { columnName?: string | null; - /** - * The format in which that time field is expressed. The time_format must either be one of: * `unix-seconds` * `unix-milliseconds` * `unix-microseconds` * `unix-nanoseconds` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in `strftime` syntax. If time_format is not set, then the default format is RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) - */ - timeFormat?: string | null; - } - /** - * Model metadata specific to Seq2Seq Plus Forecasting. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata { - /** - * BigQuery destination uri for exported evaluated examples. - */ - evaluatedDataItemsBigqueryUri?: string | null; - /** - * Output only. The actual training cost of the model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. - */ - trainCostMilliNodeHours?: string | null; } /** - * A TrainingJob that trains and uploads an AutoML Forecasting Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecasting { - /** - * The input parameters of this TrainingJob. - */ - inputs?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputs; - /** - * The metadata information. - */ - metadata?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingMetadata; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputs { - /** - * Additional experiment flags for the time series forcasting training. - */ - additionalExperiments?: string[] | null; - /** - * Names of columns that are available and provided when a forecast is requested. These columns contain information for the given entity (identified by the time_series_identifier_column column) that is known at forecast. For example, predicted weather for a specific day. - */ - availableAtForecastColumns?: string[] | null; - /** - * The amount of time into the past training and prediction data is used for model training and prediction respectively. Expressed in number of units defined by the `data_granularity` field. - */ - contextWindow?: string | null; - /** - * Expected difference in time granularity between rows in the data. - */ - dataGranularity?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsGranularity; - /** - * Configuration for exporting test set predictions to a BigQuery table. If this configuration is absent, then the export is not performed. - */ - exportEvaluatedDataItemsConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; - /** - * The amount of time into the future for which forecasted values for the target are returned. Expressed in number of units defined by the `data_granularity` field. - */ - forecastHorizon?: string | null; - /** - * Configuration that defines the hierarchical relationship of time series and parameters for hierarchical forecasting strategies. - */ - hierarchyConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionHierarchyConfig; - /** - * The geographical region based on which the holiday effect is applied in modeling by adding holiday categorical array feature that include all holidays matching the date. This option only allowed when data_granularity is day. By default, holiday effect modeling is disabled. To turn it on, specify the holiday region using this option. - */ - holidayRegions?: string[] | null; - /** - * Objective function the model is optimizing towards. The training process creates a model that optimizes the value of the objective function over the validation set. The supported optimization objectives: * "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - Minimize the mean absolute percentage error. - */ - optimizationObjective?: string | null; - /** - * Quantiles to use for minimize-quantile-loss `optimization_objective`. Up to 5 quantiles are allowed of values between 0 and 1, exclusive. Required if the value of optimization_objective is minimize-quantile-loss. Represents the percent quantiles to use for that objective. Quantiles must be unique. - */ - quantiles?: number[] | null; - /** - * The name of the column that the Model is to predict values for. This column must be unavailable at forecast. - */ - targetColumn?: string | null; - /** - * The name of the column that identifies time order in the time series. This column must be available at forecast. - */ - timeColumn?: string | null; - /** - * Column names that should be used as attribute columns. The value of these columns does not vary as a function of time. For example, store ID or item color. - */ - timeSeriesAttributeColumns?: string[] | null; - /** - * The name of the column that identifies the time series. - */ - timeSeriesIdentifierColumn?: string | null; - /** - * Required. The train budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a model for the given dataset, the training won't be attempted and will error. The train budget must be between 1,000 and 72,000 milli node hours, inclusive. - */ - trainBudgetMilliNodeHours?: string | null; - /** - * Each transformation will apply transform function to given input column. And the result will be used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. - */ - transformations?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformation[]; - /** - * Names of columns that are unavailable when a forecast is requested. This column contains information for the given entity (identified by the time_series_identifier_column) that is unknown before the forecast For example, actual weather on a given day. - */ - unavailableAtForecastColumns?: string[] | null; - /** - * Validation options for the data validation component. The available options are: * "fail-pipeline" - default, will validate against the validation and fail the pipeline if it fails. * "ignore-validation" - ignore the results of the validation and continue - */ - validationOptions?: string | null; - /** - * Column name that should be used as the weight column. Higher values in this column give more importance to the row during model training. The column must have numeric values between 0 and 10000 inclusively; 0 means the row is ignored for training. If weight column field is not set, then all rows are assumed to have equal weight of 1. This column must be available at forecast. - */ - weightColumn?: string | null; - /** - * Config containing strategy for generating sliding windows. - */ - windowConfig?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionWindowConfig; - } - /** - * A duration of time expressed in time granularity units. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsGranularity { - /** - * The number of granularity_units between data points in the training data. If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all other values of `granularity_unit`, must be 1. - */ - quantity?: string | null; - /** - * The time granularity unit of this time period. The supported units are: * "minute" * "hour" * "day" * "week" * "month" * "year" - */ - unit?: string | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformation { - auto?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationAutoTransformation; - categorical?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationCategoricalTransformation; - numeric?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationNumericTransformation; - text?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTextTransformation; - timestamp?: Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTimestampTransformation; - } - /** - * Training pipeline will infer the proper transformation based on the statistic of dataset. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationAutoTransformation { - columnName?: string | null; - } - /** - * Training pipeline will perform following transformation functions. * The categorical string as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. * Categories that appear less than 5 times in the training dataset are treated as the "unknown" category. The "unknown" category gets its own special lookup index and resulting embedding. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationCategoricalTransformation { - columnName?: string | null; - } - /** - * Training pipeline will perform following transformation functions. * The value converted to float32. * The z_score of the value. * log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * z_score of log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationNumericTransformation { - columnName?: string | null; - } - /** - * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTextTransformation { - columnName?: string | null; - } - /** - * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTimestampTransformation { - columnName?: string | null; - /** - * The format in which that time field is expressed. The time_format must either be one of: * `unix-seconds` * `unix-milliseconds` * `unix-microseconds` * `unix-nanoseconds` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in `strftime` syntax. If time_format is not set, then the default format is RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) - */ - timeFormat?: string | null; - } - /** - * Model metadata specific to TFT Forecasting. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingMetadata { - /** - * BigQuery destination uri for exported evaluated examples. - */ - evaluatedDataItemsBigqueryUri?: string | null; - /** - * Output only. The actual training cost of the model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. - */ - trainCostMilliNodeHours?: string | null; - } - /** - * Config that contains the strategy used to generate sliding windows in time series training. A window is a series of rows that comprise the context up to the time of prediction, and the horizon following. The corresponding row for each window marks the start of the forecast horizon. Each window is used as an input example for training/evaluation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionWindowConfig { - /** - * Name of the column that should be used to generate sliding windows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window with the horizon starting at that row. The column will not be used as a feature in training. - */ - column?: string | null; - /** - * Maximum number of windows that should be generated across all time series. - */ - maxCount?: string | null; - /** - * Stride length used to generate input examples. Within one time series, every {$STRIDE_LENGTH\} rows will be used to generate a sliding window. - */ - strideLength?: string | null; - } - /** - * A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVertex { - /** - * X coordinate. - */ - x?: number | null; - /** - * Y coordinate. - */ - y?: number | null; - } - /** - * Annotation details specific to video action recognition. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoActionRecognitionAnnotation { - /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. - */ - annotationSpecId?: string | null; - /** - * The display name of the AnnotationSpec that this Annotation pertains to. - */ - displayName?: string | null; - /** - * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. - */ - timeSegment?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSegment; - } - /** - * Annotation details specific to video classification. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoClassificationAnnotation { - /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. - */ - annotationSpecId?: string | null; - /** - * The display name of the AnnotationSpec that this Annotation pertains to. - */ - displayName?: string | null; - /** - * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. - */ - timeSegment?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSegment; - } - /** - * Payload of Video DataItem. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoDataItem { - /** - * Required. Google Cloud Storage URI points to the original video in user's bucket. The video is up to 50 GB in size and up to 3 hour in duration. - */ - gcsUri?: string | null; - /** - * Output only. The mime type of the content of the video. Only the videos in below listed mime types are supported. Supported mime_type: - video/mp4 - video/avi - video/quicktime - */ - mimeType?: string | null; - } - /** - * The metadata of Datasets that contain Video DataItems. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoDatasetMetadata { - /** - * Points to a YAML file stored on Google Cloud Storage describing payload of the Video DataItems that belong to this Dataset. - */ - dataItemSchemaUri?: string | null; - /** - * Google Cloud Storage Bucket name that contains the blob data of this Dataset. - */ - gcsBucket?: string | null; - } - /** - * Annotation details specific to video object tracking. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoObjectTrackingAnnotation { - /** - * The resource Id of the AnnotationSpec that this Annotation pertains to. - */ - annotationSpecId?: string | null; - /** - * The display name of the AnnotationSpec that this Annotation pertains to. - */ - displayName?: string | null; - /** - * The instance of the object, expressed as a positive integer. Used to track the same object across different frames. - */ - instanceId?: string | null; - /** - * A time (frame) of a video to which this annotation pertains. Represented as the duration since the video's start. - */ - timeOffset?: string | null; - /** - * The rightmost coordinate of the bounding box. - */ - xMax?: number | null; - /** - * The leftmost coordinate of the bounding box. - */ - xMin?: number | null; - /** - * The bottommost coordinate of the bounding box. - */ - yMax?: number | null; - /** - * The topmost coordinate of the bounding box. - */ - yMin?: number | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVisualInspectionClassificationLabelSavedQueryMetadata { - /** - * Whether or not the classification label is multi_label. - */ - multiLabel?: boolean | null; - } - export interface Schema$GoogleCloudAiplatformV1beta1SchemaVisualInspectionMaskSavedQueryMetadata {} - /** - * Response message for DatasetService.SearchDataItems. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchDataItemsResponse { - /** - * The DataItemViews read. - */ - dataItemViews?: Schema$GoogleCloudAiplatformV1beta1DataItemView[]; - /** - * A token to retrieve next page of results. Pass to SearchDataItemsRequest.page_token to obtain that page. - */ - nextPageToken?: string | null; - } - /** - * Google search entry point. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchEntryPoint { - /** - * Optional. Web content snippet that can be embedded in a web page or an app webview. - */ - renderedContent?: string | null; - /** - * Optional. Base64 encoded JSON representing array of tuple. - */ - sdkBlob?: string | null; - } - /** - * Response message for FeaturestoreService.SearchFeatures. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchFeaturesResponse { - /** - * The Features matching the request. Fields returned: * `name` * `description` * `labels` * `create_time` * `update_time` - */ - features?: Schema$GoogleCloudAiplatformV1beta1Feature[]; - /** - * A token, which can be sent as SearchFeaturesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. - */ - nextPageToken?: string | null; - } - /** - * Request message for MigrationService.SearchMigratableResources. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchMigratableResourcesRequest { - /** - * A filter for your search. You can use the following types of filters: * Resource type filters. The following strings filter for a specific type of MigratableResource: * `ml_engine_model_version:*` * `automl_model:*` * `automl_dataset:*` * `data_labeling_dataset:*` * "Migrated or not" filters. The following strings filter for resources that either have or have not already been migrated: * `last_migrate_time:*` filters for migrated resources. * `NOT last_migrate_time:*` filters for not yet migrated resources. - */ - filter?: string | null; - /** - * The standard page size. The default and maximum value is 100. - */ - pageSize?: number | null; - /** - * The standard page token. - */ - pageToken?: string | null; - } - /** - * Response message for MigrationService.SearchMigratableResources. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchMigratableResourcesResponse { - /** - * All migratable resources that can be migrated to the location specified in the request. - */ - migratableResources?: Schema$GoogleCloudAiplatformV1beta1MigratableResource[]; - /** - * The standard next-page token. The migratable_resources may not fill page_size in SearchMigratableResourcesRequest even when there are subsequent pages. - */ - nextPageToken?: string | null; - } - /** - * Request message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesRequest { - /** - * Required. The DeployedModel ID of the [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - */ - deployedModelId?: string | null; - /** - * The latest timestamp of stats being generated. If not set, indicates feching stats till the latest possible one. - */ - endTime?: string | null; - /** - * The feature display name. If specified, only return the stats belonging to this feature. Format: ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name, example: "user_destination". - */ - featureDisplayName?: string | null; - /** - * Required. Objectives of the stats to retrieve. - */ - objectives?: Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective[]; - /** - * The standard list page size. - */ - pageSize?: number | null; - /** - * A page token received from a previous JobService.SearchModelDeploymentMonitoringStatsAnomalies call. - */ - pageToken?: string | null; - /** - * The earliest timestamp of stats being generated. If not set, indicates fetching stats till the earliest possible one. - */ - startTime?: string | null; - } - /** - * Stats requested for specific objective. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective { - /** - * If set, all attribution scores between SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time and SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time are fetched, and page token doesn't take effect in this case. Only used to retrieve attribution score for the top Features which has the highest attribution score in the latest monitoring run. - */ - topFeatureCount?: number | null; - type?: string | null; - } - /** - * Response message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesResponse { - /** - * Stats retrieved for requested objectives. There are at most 1000 ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats in the response. - */ - monitoringStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomalies[]; - /** - * The page token that can be used by the next JobService.SearchModelDeploymentMonitoringStatsAnomalies call. - */ - nextPageToken?: string | null; - } - /** - * Request message for ModelMonitoringService.SearchModelMonitoringAlerts. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringAlertsRequest { - /** - * If non-empty, returns the alerts in this time interval. - */ - alertTimeInterval?: Schema$GoogleTypeInterval; - /** - * If non-empty, returns the alerts of this model monitoring job. - */ - modelMonitoringJob?: string | null; - /** - * If non-empty, returns the alerts of this objective type. Supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` - */ - objectiveType?: string | null; - /** - * The standard list page size. - */ - pageSize?: number | null; - /** - * A page token received from a previous ModelMonitoringService.SearchModelMonitoringAlerts call. - */ - pageToken?: string | null; - /** - * If non-empty, returns the alerts of this stats_name. - */ - statsName?: string | null; - } - /** - * Response message for ModelMonitoringService.SearchModelMonitoringAlerts. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringAlertsResponse { - /** - * Alerts retrieved for the requested objectives. Sorted by alert time descendingly. - */ - modelMonitoringAlerts?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlert[]; - /** - * The page token that can be used by the next ModelMonitoringService.SearchModelMonitoringAlerts call. - */ - nextPageToken?: string | null; - /** - * The total number of alerts retrieved by the requested objectives. - */ - totalNumberAlerts?: string | null; - } - /** - * Filter for searching ModelMonitoringStats. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilter { - /** - * Tabular statistics filter. - */ - tabularStatsFilter?: Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilterTabularStatsFilter; - } - /** - * Tabular statistics filter. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilterTabularStatsFilter { - /** - * Specify the algorithm type used for distance calculation, eg: jensen_shannon_divergence, l_infinity. - */ - algorithm?: string | null; - /** - * From a particular monitoring job. - */ - modelMonitoringJob?: string | null; - /** - * From a particular monitoring schedule. - */ - modelMonitoringSchedule?: string | null; - /** - * One of the supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` - */ - objectiveType?: string | null; - /** - * If not specified, will return all the stats_names. - */ - statsName?: string | null; - } - /** - * Request message for ModelMonitoringService.SearchModelMonitoringStats. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsRequest { - /** - * The standard list page size. - */ - pageSize?: number | null; - /** - * A page token received from a previous ModelMonitoringService.SearchModelMonitoringStats call. - */ - pageToken?: string | null; - /** - * Filter for search different stats. - */ - statsFilter?: Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilter; - /** - * The time interval for which results should be returned. - */ - timeInterval?: Schema$GoogleTypeInterval; - } - /** - * Response message for ModelMonitoringService.SearchModelMonitoringStats. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsResponse { - /** - * Stats retrieved for requested objectives. - */ - monitoringStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStats[]; - /** - * The page token that can be used by the next ModelMonitoringService.SearchModelMonitoringStats call. - */ - nextPageToken?: string | null; - } - /** - * The request message for FeatureOnlineStoreService.SearchNearestEntities. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchNearestEntitiesRequest { - /** - * Required. The query. - */ - query?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQuery; - /** - * Optional. If set to true, the full entities (including all vector values and metadata) of the nearest neighbors are returned; otherwise only entity id of the nearest neighbors will be returned. Note that returning full entities will significantly increase the latency and cost of the query. - */ - returnFullEntity?: boolean | null; - } - /** - * Response message for FeatureOnlineStoreService.SearchNearestEntities - */ - export interface Schema$GoogleCloudAiplatformV1beta1SearchNearestEntitiesResponse { - /** - * The nearest neighbors of the query entity. - */ - nearestNeighbors?: Schema$GoogleCloudAiplatformV1beta1NearestNeighbors; - } - /** - * Configuration for the use of custom service account to run the workloads. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ServiceAccountSpec { - /** - * Required. If true, custom user-managed service account is enforced to run any workloads (for example, Vertex Jobs) on the resource. Otherwise, uses the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). - */ - enableCustomServiceAccount?: boolean | null; - /** - * Optional. Required when all below conditions are met * `enable_custom_service_account` is true; * any runtime is specified via `ResourceRuntimeSpec` on creation time, for example, Ray The users must have `iam.serviceAccounts.actAs` permission on this service account and then the specified runtime containers will run as it. Do not set this field if you want to submit jobs using custom service account to this PersistentResource after creation, but only specify the `service_account` inside the job. - */ - serviceAccount?: string | null; - } - /** - * A set of Shielded Instance options. See [Images using supported Shielded VM features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm). - */ - export interface Schema$GoogleCloudAiplatformV1beta1ShieldedVmConfig { - /** - * Defines whether the instance has [Secure Boot](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot) enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. - */ - enableSecureBoot?: boolean | null; - } - /** - * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf - */ - export interface Schema$GoogleCloudAiplatformV1beta1SmoothGradConfig { - /** - * This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features. - */ - featureNoiseSigma?: Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigma; - /** - * This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. - */ - noiseSigma?: number | null; - /** - * The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. - */ - noisySampleCount?: number | null; - } - /** - * SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers and workers. Managers are responsible for managing the workers in this pool as well as customers' data labeling jobs associated with this pool. Customers create specialist pool as well as start data labeling jobs on Cloud, managers and workers handle the jobs using CrowdCompute console. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SpecialistPool { - /** - * Required. The user-defined name of the SpecialistPool. The name can be up to 128 characters long and can consist of any UTF-8 characters. This field should be unique on project-level. - */ - displayName?: string | null; - /** - * Required. The resource name of the SpecialistPool. - */ - name?: string | null; - /** - * Output only. The resource name of the pending data labeling jobs. - */ - pendingDataLabelingJobs?: string[] | null; - /** - * The email addresses of the managers in the SpecialistPool. - */ - specialistManagerEmails?: string[] | null; - /** - * Output only. The number of managers in this SpecialistPool. - */ - specialistManagersCount?: number | null; - /** - * The email addresses of workers in the SpecialistPool. - */ - specialistWorkerEmails?: string[] | null; - } - /** - * Metadata information for NotebookService.StartNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StartNotebookRuntimeOperationMetadata { - /** - * The operation generic information. - */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - /** - * A human-readable message that shows the intermediate progress details of NotebookRuntime. - */ - progressMessage?: string | null; - } - /** - * Request message for NotebookService.StartNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StartNotebookRuntimeRequest {} - /** - * Request message for VizierService.StopTrial. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StopTrialRequest {} - /** - * Assigns input data to the training, validation, and test sets so that the distribution of values found in the categorical column (as specified by the `key` field) is mirrored within each split. The fraction values determine the relative sizes of the splits. For example, if the specified column has three values, with 50% of the rows having value "A", 25% value "B", and 25% value "C", and the split fractions are specified as 80/10/10, then the training set will constitute 80% of the training data, with about 50% of the training set rows having the value "A" for the specified column, about 25% having the value "B", and about 25% having the value "C". Only the top 500 occurring values are used; any values not in the top 500 values are randomly assigned to a split. If less than three rows contain a specific value, those rows are randomly assigned. Supported only for tabular Datasets. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StratifiedSplit { - /** - * Required. The key is a name of one of the Dataset's data columns. The key provided must be for a categorical column. - */ - key?: string | null; - /** - * The fraction of the input data that is to be used to evaluate the Model. - */ - testFraction?: number | null; - /** - * The fraction of the input data that is to be used to train the Model. - */ - trainingFraction?: number | null; - /** - * The fraction of the input data that is to be used to validate the Model. - */ - validationFraction?: number | null; - } - /** - * Request message for FeatureOnlineStoreService.StreamingFetchFeatureValues. For the entities requested, all features under the requested feature view will be returned. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StreamingFetchFeatureValuesRequest { - /** - * Specify response data format. If not set, KeyValue format will be used. - */ - dataFormat?: string | null; - dataKeys?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey[]; - } - /** - * Response message for FeatureOnlineStoreService.StreamingFetchFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StreamingFetchFeatureValuesResponse { - data?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponse[]; - dataKeysWithError?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey[]; - /** - * Response status. If OK, then StreamingFetchFeatureValuesResponse.data will be populated. Otherwise StreamingFetchFeatureValuesResponse.data_keys_with_error will be populated with the appropriate data keys. The error only applies to the listed data keys - the stream will remain open for further FeatureOnlineStoreService.StreamingFetchFeatureValuesRequest requests. - */ - status?: Schema$GoogleRpcStatus; - } - /** - * Request message for PredictionService.StreamingPredict. The first message must contain endpoint field and optionally input. The subsequent messages must contain input. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StreamingPredictRequest { - /** - * The prediction input. - */ - inputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; - /** - * The parameters that govern the prediction. - */ - parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; - } - /** - * Response message for PredictionService.StreamingPredict. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StreamingPredictResponse { - /** - * The prediction output. - */ - outputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; - /** - * The parameters that govern the prediction. - */ - parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; - } - /** - * Request message for FeaturestoreOnlineServingService.StreamingFeatureValuesRead. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StreamingReadFeatureValuesRequest { - /** - * Required. IDs of entities to read Feature values of. The maximum number of IDs is 100. For example, for a machine learning model predicting user clicks on a website, an entity ID could be `user_123`. - */ - entityIds?: string[] | null; - /** - * Required. Selector choosing Features of the target EntityType. Feature IDs will be deduplicated. - */ - featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; - } - /** - * A list of string values. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StringArray { - /** - * A list of string values. - */ - values?: string[] | null; - } - /** - * A message representing a Study. - */ - export interface Schema$GoogleCloudAiplatformV1beta1Study { - /** - * Output only. Time at which the study was created. - */ - createTime?: string | null; - /** - * Required. Describes the Study, default value is empty string. - */ - displayName?: string | null; - /** - * Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. - */ - inactiveReason?: string | null; - /** - * Output only. The name of a study. The study's globally unique identifier. Format: `projects/{project\}/locations/{location\}/studies/{study\}` - */ - name?: string | null; - /** - * Output only. The detailed state of a Study. - */ - state?: string | null; - /** - * Required. Configuration of the Study. - */ - studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; - } - /** - * Represents specification of a Study. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpec { - /** - * The search algorithm specified for the Study. - */ - algorithm?: string | null; - /** - * The automated early stopping spec using convex stopping rule. - */ - convexAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecConvexAutomatedStoppingSpec; - /** - * Deprecated. The automated early stopping using convex stopping rule. - */ - convexStopConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecConvexStopConfig; - /** - * The automated early stopping spec using decay curve rule. - */ - decayCurveStoppingSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecDecayCurveAutomatedStoppingSpec; - /** - * Describe which measurement selection type will be used - */ - measurementSelectionType?: string | null; - /** - * The automated early stopping spec using median rule. - */ - medianAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecMedianAutomatedStoppingSpec; - /** - * Required. Metric specs for the Study. - */ - metrics?: Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpec[]; - /** - * The observation noise level of the study. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - observationNoise?: string | null; - /** - * Required. The set of parameters to tune. - */ - parameters?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpec[]; - /** - * Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. - */ - studyStoppingConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig; - /** - * The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob - */ - transferLearningConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig; - } - /** - * Configuration for ConvexAutomatedStoppingSpec. When there are enough completed trials (configured by min_measurement_count), for pending trials with enough measurements and steps, the policy first computes an overestimate of the objective value at max_num_steps according to the slope of the incomplete objective value curve. No prediction can be made if the curve is completely flat. If the overestimation is worse than the best objective value of the completed trials, this pending trial will be early-stopped, but a last measurement will be added to the pending trial with max_num_steps and predicted objective value from the autoregression model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecConvexAutomatedStoppingSpec { - /** - * The hyper-parameter name used in the tuning job that stands for learning rate. Leave it blank if learning rate is not in a parameter in tuning. The learning_rate is used to estimate the objective value of the ongoing trial. - */ - learningRateParameterName?: string | null; - /** - * Steps used in predicting the final objective for early stopped trials. In general, it's set to be the same as the defined steps in training / tuning. If not defined, it will learn it from the completed trials. When use_steps is false, this field is set to the maximum elapsed seconds. - */ - maxStepCount?: string | null; - /** - * The minimal number of measurements in a Trial. Early-stopping checks will not trigger if less than min_measurement_count+1 completed trials or pending trials with less than min_measurement_count measurements. If not defined, the default value is 5. - */ - minMeasurementCount?: string | null; - /** - * Minimum number of steps for a trial to complete. Trials which do not have a measurement with step_count \> min_step_count won't be considered for early stopping. It's ok to set it to 0, and a trial can be early stopped at any stage. By default, min_step_count is set to be one-tenth of the max_step_count. When use_elapsed_duration is true, this field is set to the minimum elapsed seconds. - */ - minStepCount?: string | null; - /** - * ConvexAutomatedStoppingSpec by default only updates the trials that needs to be early stopped using a newly trained auto-regressive model. When this flag is set to True, all stopped trials from the beginning are potentially updated in terms of their `final_measurement`. Also, note that the training logic of autoregressive models is different in this case. Enabling this option has shown better results and this may be the default option in the future. - */ - updateAllStoppedTrials?: boolean | null; - /** - * This bool determines whether or not the rule is applied based on elapsed_secs or steps. If use_elapsed_duration==false, the early stopping decision is made according to the predicted objective values according to the target steps. If use_elapsed_duration==true, elapsed_secs is used instead of steps. Also, in this case, the parameters max_num_steps and min_num_steps are overloaded to contain max_elapsed_seconds and min_elapsed_seconds. - */ - useElapsedDuration?: boolean | null; - } - /** - * Configuration for ConvexStopPolicy. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecConvexStopConfig { - /** - * The number of Trial measurements used in autoregressive model for value prediction. A trial won't be considered early stopping if has fewer measurement points. - */ - autoregressiveOrder?: string | null; - /** - * The hyper-parameter name used in the tuning job that stands for learning rate. Leave it blank if learning rate is not in a parameter in tuning. The learning_rate is used to estimate the objective value of the ongoing trial. - */ - learningRateParameterName?: string | null; - /** - * Steps used in predicting the final objective for early stopped trials. In general, it's set to be the same as the defined steps in training / tuning. When use_steps is false, this field is set to the maximum elapsed seconds. - */ - maxNumSteps?: string | null; - /** - * Minimum number of steps for a trial to complete. Trials which do not have a measurement with num_steps \> min_num_steps won't be considered for early stopping. It's ok to set it to 0, and a trial can be early stopped at any stage. By default, min_num_steps is set to be one-tenth of the max_num_steps. When use_steps is false, this field is set to the minimum elapsed seconds. - */ - minNumSteps?: string | null; - /** - * This bool determines whether or not the rule is applied based on elapsed_secs or steps. If use_seconds==false, the early stopping decision is made according to the predicted objective values according to the target steps. If use_seconds==true, elapsed_secs is used instead of steps. Also, in this case, the parameters max_num_steps and min_num_steps are overloaded to contain max_elapsed_seconds and min_elapsed_seconds. - */ - useSeconds?: boolean | null; - } - /** - * The decay curve automated stopping rule builds a Gaussian Process Regressor to predict the final objective value of a Trial based on the already completed Trials and the intermediate measurements of the current Trial. Early stopping is requested for the current Trial if there is very low probability to exceed the optimal value found so far. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecDecayCurveAutomatedStoppingSpec { - /** - * True if Measurement.elapsed_duration is used as the x-axis of each Trials Decay Curve. Otherwise, Measurement.step_count will be used as the x-axis. - */ - useElapsedDuration?: boolean | null; - } - /** - * The median automated stopping rule stops a pending Trial if the Trial's best objective_value is strictly below the median 'performance' of all completed Trials reported up to the Trial's last measurement. Currently, 'performance' refers to the running average of the objective values reported by the Trial in each measurement. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecMedianAutomatedStoppingSpec { - /** - * True if median automated stopping rule applies on Measurement.elapsed_duration. It means that elapsed_duration field of latest measurement of current Trial is used to compute median objective value for each completed Trials. - */ - useElapsedDuration?: boolean | null; - } - /** - * Represents a metric to optimize. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpec { - /** - * Required. The optimization goal of the metric. - */ - goal?: string | null; - /** - * Required. The ID of the metric. Must not contain whitespaces and must be unique amongst all MetricSpecs. - */ - metricId?: string | null; - /** - * Used for safe search. In the case, the metric will be a safety metric. You must provide a separate metric for objective metric. - */ - safetyConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpecSafetyMetricConfig; - } - /** - * Used in safe optimization to specify threshold levels and risk tolerance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpecSafetyMetricConfig { - /** - * Desired minimum fraction of safe trials (over total number of trials) that should be targeted by the algorithm at any time during the study (best effort). This should be between 0.0 and 1.0 and a value of 0.0 means that there is no minimum and an algorithm proceeds without targeting any specific fraction. A value of 1.0 means that the algorithm attempts to only Suggest safe Trials. - */ - desiredMinSafeTrialsFraction?: number | null; - /** - * Safety threshold (boundary value between safe and unsafe). NOTE that if you leave SafetyMetricConfig unset, a default value of 0 will be used. - */ - safetyThreshold?: number | null; - } - /** - * Represents a single parameter to optimize. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpec { - /** - * The value spec for a 'CATEGORICAL' parameter. - */ - categoricalValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecCategoricalValueSpec; - /** - * A conditional parameter node is active if the parameter's value matches the conditional node's parent_value_condition. If two items in conditional_parameter_specs have the same name, they must have disjoint parent_value_condition. - */ - conditionalParameterSpecs?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpec[]; - /** - * The value spec for a 'DISCRETE' parameter. - */ - discreteValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDiscreteValueSpec; - /** - * The value spec for a 'DOUBLE' parameter. - */ - doubleValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDoubleValueSpec; - /** - * The value spec for an 'INTEGER' parameter. - */ - integerValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecIntegerValueSpec; - /** - * Required. The ID of the parameter. Must not contain whitespaces and must be unique amongst all ParameterSpecs. - */ - parameterId?: string | null; - /** - * How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. - */ - scaleType?: string | null; - } - /** - * Value specification for a parameter in `CATEGORICAL` type. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecCategoricalValueSpec { - /** - * A default value for a `CATEGORICAL` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: string | null; - /** - * Required. The list of possible categories. - */ - values?: string[] | null; - } - /** - * Represents a parameter spec with condition from its parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpec { - /** - * Required. The spec for a conditional parameter. - */ - parameterSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpec; - /** - * The spec for matching values from a parent parameter of `CATEGORICAL` type. - */ - parentCategoricalValues?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition; - /** - * The spec for matching values from a parent parameter of `DISCRETE` type. - */ - parentDiscreteValues?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition; - /** - * The spec for matching values from a parent parameter of `INTEGER` type. - */ - parentIntValues?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecIntValueCondition; - } - /** - * Represents the spec to match categorical values from parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition { - /** - * Required. Matches values of the parent parameter of 'CATEGORICAL' type. All values must exist in `categorical_value_spec` of parent parameter. - */ - values?: string[] | null; - } - /** - * Represents the spec to match discrete values from parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition { - /** - * Required. Matches values of the parent parameter of 'DISCRETE' type. All values must exist in `discrete_value_spec` of parent parameter. The Epsilon of the value matching is 1e-10. - */ - values?: number[] | null; - } - /** - * Represents the spec to match integer values from parent parameter. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecIntValueCondition { - /** - * Required. Matches values of the parent parameter of 'INTEGER' type. All values must lie in `integer_value_spec` of parent parameter. - */ - values?: string[] | null; - } - /** - * Value specification for a parameter in `DISCRETE` type. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDiscreteValueSpec { - /** - * A default value for a `DISCRETE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. It automatically rounds to the nearest feasible discrete point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: number | null; - /** - * Required. A list of possible values. The list should be in increasing order and at least 1e-10 apart. For instance, this parameter might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. - */ - values?: number[] | null; - } - /** - * Value specification for a parameter in `DOUBLE` type. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDoubleValueSpec { - /** - * A default value for a `DOUBLE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: number | null; - /** - * Required. Inclusive maximum value of the parameter. - */ - maxValue?: number | null; - /** - * Required. Inclusive minimum value of the parameter. - */ - minValue?: number | null; - } - /** - * Value specification for a parameter in `INTEGER` type. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecIntegerValueSpec { - /** - * A default value for an `INTEGER` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. - */ - defaultValue?: string | null; - /** - * Required. Inclusive maximum value of the parameter. - */ - maxValue?: string | null; - /** - * Required. Inclusive minimum value of the parameter. - */ - minValue?: string | null; - } - /** - * The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. - */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig { - /** - * If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. - */ - maxDurationNoProgress?: string | null; - /** - * If the specified time or duration has passed, stop the study. - */ - maximumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1beta1StudyTimeConstraint; - /** - * If there are more than this many trials, stop the study. - */ - maxNumTrials?: number | null; - /** - * If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. - */ - maxNumTrialsNoProgress?: number | null; - /** - * Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. - */ - minimumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1beta1StudyTimeConstraint; - /** - * If there are fewer than this many COMPLETED trials, do not stop the study. - */ - minNumTrials?: number | null; + * Training pipeline will perform following transformation functions. * The value converted to float32. * The z_score of the value. * log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. * z_score of log(value+1) when the value is greater than or equal to 0. Otherwise, this transformation is not applied and the value is considered a missing value. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationNumericTransformation { + columnName?: string | null; + } + /** + * Training pipeline will perform following transformation functions. * The text as is--no change to case, punctuation, spelling, tense, and so on. * Convert the category name to a dictionary lookup index and generate an embedding for each index. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTextTransformation { + columnName?: string | null; + } + /** + * Training pipeline will perform following transformation functions. * Apply the transformation functions for Numerical columns. * Determine the year, month, day,and weekday. Treat each value from the timestamp as a Categorical column. * Invalid numerical values (for example, values that fall outside of a typical timestamp range, or are extreme values) receive no special treatment and are not removed. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTimestampTransformation { + columnName?: string | null; /** - * If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + * The format in which that time field is expressed. The time_format must either be one of: * `unix-seconds` * `unix-milliseconds` * `unix-microseconds` * `unix-nanoseconds` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in `strftime` syntax. If time_format is not set, then the default format is RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ - shouldStopAsap?: boolean | null; + timeFormat?: string | null; } /** - * This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. + * Model metadata specific to TFT Forecasting. */ - export interface Schema$GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionTftForecastingMetadata { /** - * Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. + * BigQuery destination uri for exported evaluated examples. */ - disableTransferLearning?: boolean | null; + evaluatedDataItemsBigqueryUri?: string | null; /** - * Output only. Names of previously completed studies + * Output only. The actual training cost of the model, expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. */ - priorStudyNames?: string[] | null; + trainCostMilliNodeHours?: string | null; } /** - * Time-based Constraint for Study + * Config that contains the strategy used to generate sliding windows in time series training. A window is a series of rows that comprise the context up to the time of prediction, and the horizon following. The corresponding row for each window marks the start of the forecast horizon. Each window is used as an input example for training/evaluation. */ - export interface Schema$GoogleCloudAiplatformV1beta1StudyTimeConstraint { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaTrainingjobDefinitionWindowConfig { /** - * Compares the wallclock time to this time. Must use UTC timezone. + * Name of the column that should be used to generate sliding windows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window with the horizon starting at that row. The column will not be used as a feature in training. */ - endTime?: string | null; + column?: string | null; /** - * Counts the wallclock time passed since the creation of this Study. + * Maximum number of windows that should be generated across all time series. */ - maxDuration?: string | null; + maxCount?: string | null; + /** + * Stride length used to generate input examples. Within one time series, every {$STRIDE_LENGTH\} rows will be used to generate a sliding window. + */ + strideLength?: string | null; } /** - * Details of operations that perform Trials suggestion. + * A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1. */ - export interface Schema$GoogleCloudAiplatformV1beta1SuggestTrialsMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVertex { /** - * The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. + * X coordinate. */ - clientId?: string | null; + x?: number | null; /** - * Operation metadata for suggesting Trials. + * Y coordinate. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + y?: number | null; } /** - * Request message for VizierService.SuggestTrials. + * Annotation details specific to video action recognition. */ - export interface Schema$GoogleCloudAiplatformV1beta1SuggestTrialsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoActionRecognitionAnnotation { /** - * Required. The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - clientId?: string | null; + annotationSpecId?: string | null; /** - * Optional. This allows you to specify the "context" for a Trial; a context is a slice (a subspace) of the search space. Typical uses for contexts: 1) You are using Vizier to tune a server for best performance, but there's a strong weekly cycle. The context specifies the day-of-week. This allows Tuesday to generalize from Wednesday without assuming that everything is identical. 2) Imagine you're optimizing some medical treatment for people. As they walk in the door, you know certain facts about them (e.g. sex, weight, height, blood-pressure). Put that information in the context, and Vizier will adapt its suggestions to the patient. 3) You want to do a fair A/B test efficiently. Specify the "A" and "B" conditions as contexts, and Vizier will generalize between "A" and "B" conditions. If they are similar, this will allow Vizier to converge to the optimum faster than if "A" and "B" were separate Studies. NOTE: You can also enter contexts as REQUESTED Trials, e.g. via the CreateTrial() RPC; that's the asynchronous option where you don't need a close association between contexts and suggestions. NOTE: All the Parameters you set in a context MUST be defined in the Study. NOTE: You must supply 0 or $suggestion_count contexts. If you don't supply any contexts, Vizier will make suggestions from the full search space specified in the StudySpec; if you supply a full set of context, each suggestion will match the corresponding context. NOTE: A Context with no features set matches anything, and allows suggestions from the full search space. NOTE: Contexts MUST lie within the search space specified in the StudySpec. It's an error if they don't. NOTE: Contexts preferentially match ACTIVE then REQUESTED trials before new suggestions are generated. NOTE: Generation of suggestions involves a match between a Context and (optionally) a REQUESTED trial; if that match is not fully specified, a suggestion will be geneated in the merged subspace. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - contexts?: Schema$GoogleCloudAiplatformV1beta1TrialContext[]; + displayName?: string | null; /** - * Required. The number of suggestions requested. It must be positive. + * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. */ - suggestionCount?: number | null; + timeSegment?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSegment; } /** - * Response message for VizierService.SuggestTrials. + * Annotation details specific to video classification. */ - export interface Schema$GoogleCloudAiplatformV1beta1SuggestTrialsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoClassificationAnnotation { /** - * The time at which operation processing completed. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - endTime?: string | null; + annotationSpecId?: string | null; /** - * The time at which the operation was started. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - startTime?: string | null; + displayName?: string | null; /** - * The state of the Study. + * This Annotation applies to the time period represented by the TimeSegment. If it's not set, the Annotation applies to the whole video. */ - studyState?: string | null; + timeSegment?: Schema$GoogleCloudAiplatformV1beta1SchemaTimeSegment; + } + /** + * Payload of Video DataItem. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoDataItem { /** - * A list of Trials. + * Required. Google Cloud Storage URI points to the original video in user's bucket. The video is up to 50 GB in size and up to 3 hour in duration. */ - trials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; + gcsUri?: string | null; + /** + * Output only. The mime type of the content of the video. Only the videos in below listed mime types are supported. Supported mime_type: - video/mp4 - video/avi - video/quicktime + */ + mimeType?: string | null; } /** - * Input for summarization helpfulness metric. + * The metadata of Datasets that contain Video DataItems. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInput { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoDatasetMetadata { /** - * Required. Summarization helpfulness instance. + * Points to a YAML file stored on Google Cloud Storage describing payload of the Video DataItems that belong to this Dataset. */ - instance?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInstance; + dataItemSchemaUri?: string | null; /** - * Required. Spec for summarization helpfulness score metric. + * Google Cloud Storage Bucket name that contains the blob data of this Dataset. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessSpec; + gcsBucket?: string | null; } /** - * Spec for summarization helpfulness instance. + * Annotation details specific to video object tracking. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInstance { + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVideoObjectTrackingAnnotation { /** - * Required. Text to be summarized. + * The resource Id of the AnnotationSpec that this Annotation pertains to. */ - context?: string | null; + annotationSpecId?: string | null; /** - * Optional. Summarization prompt for LLM. + * The display name of the AnnotationSpec that this Annotation pertains to. */ - instruction?: string | null; + displayName?: string | null; /** - * Required. Output of the evaluated model. + * The instance of the object, expressed as a positive integer. Used to track the same object across different frames. */ - prediction?: string | null; + instanceId?: string | null; /** - * Optional. Ground truth used to compare against the prediction. + * A time (frame) of a video to which this annotation pertains. Represented as the duration since the video's start. */ - reference?: string | null; - } - /** - * Spec for summarization helpfulness result. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessResult { + timeOffset?: string | null; /** - * Output only. Confidence for summarization helpfulness score. + * The rightmost coordinate of the bounding box. */ - confidence?: number | null; + xMax?: number | null; /** - * Output only. Explanation for summarization helpfulness score. + * The leftmost coordinate of the bounding box. */ - explanation?: string | null; + xMin?: number | null; /** - * Output only. Summarization Helpfulness score. + * The bottommost coordinate of the bounding box. */ - score?: number | null; - } - /** - * Spec for summarization helpfulness score metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessSpec { + yMax?: number | null; /** - * Optional. Whether to use instance.reference to compute summarization helpfulness. + * The topmost coordinate of the bounding box. */ - useReference?: boolean | null; + yMin?: number | null; + } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVisualInspectionClassificationLabelSavedQueryMetadata { /** - * Optional. Which version to use for evaluation. + * Whether or not the classification label is multi_label. */ - version?: number | null; + multiLabel?: boolean | null; } + export interface Schema$GoogleCloudAiplatformV1beta1SchemaVisualInspectionMaskSavedQueryMetadata {} /** - * Input for summarization quality metric. + * Response message for DatasetService.SearchDataItems. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInput { + export interface Schema$GoogleCloudAiplatformV1beta1SearchDataItemsResponse { /** - * Required. Summarization quality instance. + * The DataItemViews read. */ - instance?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInstance; + dataItemViews?: Schema$GoogleCloudAiplatformV1beta1DataItemView[]; /** - * Required. Spec for summarization quality score metric. + * A token to retrieve next page of results. Pass to SearchDataItemsRequest.page_token to obtain that page. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualitySpec; + nextPageToken?: string | null; } /** - * Spec for summarization quality instance. + * Google search entry point. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInstance { - /** - * Required. Text to be summarized. - */ - context?: string | null; - /** - * Required. Summarization prompt for LLM. - */ - instruction?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SearchEntryPoint { /** - * Required. Output of the evaluated model. + * Optional. Web content snippet that can be embedded in a web page or an app webview. */ - prediction?: string | null; + renderedContent?: string | null; /** - * Optional. Ground truth used to compare against the prediction. + * Optional. Base64 encoded JSON representing array of tuple. */ - reference?: string | null; + sdkBlob?: string | null; } /** - * Spec for summarization quality result. + * Response message for FeaturestoreService.SearchFeatures. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualityResult { - /** - * Output only. Confidence for summarization quality score. - */ - confidence?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SearchFeaturesResponse { /** - * Output only. Explanation for summarization quality score. + * The Features matching the request. Fields returned: * `name` * `description` * `labels` * `create_time` * `update_time` */ - explanation?: string | null; + features?: Schema$GoogleCloudAiplatformV1beta1Feature[]; /** - * Output only. Summarization Quality score. + * A token, which can be sent as SearchFeaturesRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. */ - score?: number | null; + nextPageToken?: string | null; } /** - * Spec for summarization quality score metric. + * Request message for MigrationService.SearchMigratableResources. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualitySpec { + export interface Schema$GoogleCloudAiplatformV1beta1SearchMigratableResourcesRequest { /** - * Optional. Whether to use instance.reference to compute summarization quality. + * A filter for your search. You can use the following types of filters: * Resource type filters. The following strings filter for a specific type of MigratableResource: * `ml_engine_model_version:*` * `automl_model:*` * `automl_dataset:*` * `data_labeling_dataset:*` * "Migrated or not" filters. The following strings filter for resources that either have or have not already been migrated: * `last_migrate_time:*` filters for migrated resources. * `NOT last_migrate_time:*` filters for not yet migrated resources. */ - useReference?: boolean | null; + filter?: string | null; /** - * Optional. Which version to use for evaluation. + * The standard page size. The default and maximum value is 100. */ - version?: number | null; + pageSize?: number | null; + /** + * The standard page token. + */ + pageToken?: string | null; } /** - * Input for summarization verbosity metric. + * Response message for MigrationService.SearchMigratableResources. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInput { + export interface Schema$GoogleCloudAiplatformV1beta1SearchMigratableResourcesResponse { /** - * Required. Summarization verbosity instance. + * All migratable resources that can be migrated to the location specified in the request. */ - instance?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInstance; + migratableResources?: Schema$GoogleCloudAiplatformV1beta1MigratableResource[]; /** - * Required. Spec for summarization verbosity score metric. + * The standard next-page token. The migratable_resources may not fill page_size in SearchMigratableResourcesRequest even when there are subsequent pages. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbositySpec; + nextPageToken?: string | null; } /** - * Spec for summarization verbosity instance. + * Request message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInstance { + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesRequest { /** - * Required. Text to be summarized. + * Required. The DeployedModel ID of the [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. */ - context?: string | null; + deployedModelId?: string | null; /** - * Optional. Summarization prompt for LLM. + * The latest timestamp of stats being generated. If not set, indicates feching stats till the latest possible one. */ - instruction?: string | null; + endTime?: string | null; /** - * Required. Output of the evaluated model. + * The feature display name. If specified, only return the stats belonging to this feature. Format: ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name, example: "user_destination". */ - prediction?: string | null; + featureDisplayName?: string | null; /** - * Optional. Ground truth used to compare against the prediction. + * Required. Objectives of the stats to retrieve. */ - reference?: string | null; - } - /** - * Spec for summarization verbosity result. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityResult { + objectives?: Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective[]; /** - * Output only. Confidence for summarization verbosity score. + * The standard list page size. */ - confidence?: number | null; + pageSize?: number | null; /** - * Output only. Explanation for summarization verbosity score. + * A page token received from a previous JobService.SearchModelDeploymentMonitoringStatsAnomalies call. */ - explanation?: string | null; + pageToken?: string | null; /** - * Output only. Summarization Verbosity score. + * The earliest timestamp of stats being generated. If not set, indicates fetching stats till the earliest possible one. */ - score?: number | null; + startTime?: string | null; } /** - * Spec for summarization verbosity score metric. + * Stats requested for specific objective. */ - export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbositySpec { - /** - * Optional. Whether to use instance.reference to compute summarization verbosity. - */ - useReference?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective { /** - * Optional. Which version to use for evaluation. + * If set, all attribution scores between SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time and SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time are fetched, and page token doesn't take effect in this case. Only used to retrieve attribution score for the top Features which has the highest attribution score in the latest monitoring run. */ - version?: number | null; + topFeatureCount?: number | null; + type?: string | null; } /** - * Hyperparameters for SFT. + * Response message for JobService.SearchModelDeploymentMonitoringStatsAnomalies. */ - export interface Schema$GoogleCloudAiplatformV1beta1SupervisedHyperParameters { - /** - * Optional. Adapter size for tuning. - */ - adapterSize?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelDeploymentMonitoringStatsAnomaliesResponse { /** - * Optional. Number of complete passes the model makes over the entire training dataset during training. + * Stats retrieved for requested objectives. There are at most 1000 ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats in the response. */ - epochCount?: string | null; + monitoringStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStatsAnomalies[]; /** - * Optional. Multiplier for adjusting the default learning rate. + * The page token that can be used by the next JobService.SearchModelDeploymentMonitoringStatsAnomalies call. */ - learningRateMultiplier?: number | null; + nextPageToken?: string | null; } /** - * Dataset distribution for Supervised Tuning. + * Request message for ModelMonitoringService.SearchModelMonitoringAlerts. */ - export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution { - /** - * Output only. Defines the histogram bucket. - */ - buckets?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistributionDatasetBucket[]; - /** - * Output only. The maximum of the population values. - */ - max?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringAlertsRequest { /** - * Output only. The arithmetic mean of the values in the population. + * If non-empty, returns the alerts in this time interval. */ - mean?: number | null; + alertTimeInterval?: Schema$GoogleTypeInterval; /** - * Output only. The median of the values in the population. + * If non-empty, returns the alerts of this model monitoring job. */ - median?: number | null; + modelMonitoringJob?: string | null; /** - * Output only. The minimum of the population values. + * If non-empty, returns the alerts of this objective type. Supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` */ - min?: number | null; + objectiveType?: string | null; /** - * Output only. The 5th percentile of the values in the population. + * The standard list page size. */ - p5?: number | null; + pageSize?: number | null; /** - * Output only. The 95th percentile of the values in the population. + * A page token received from a previous ModelMonitoringService.SearchModelMonitoringAlerts call. */ - p95?: number | null; + pageToken?: string | null; /** - * Output only. Sum of a given population of values. + * If non-empty, returns the alerts of this stats_name. */ - sum?: string | null; + statsName?: string | null; } /** - * Dataset bucket used to create a histogram for the distribution given a population of values. + * Response message for ModelMonitoringService.SearchModelMonitoringAlerts. */ - export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistributionDatasetBucket { + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringAlertsResponse { /** - * Output only. Number of values in the bucket. + * Alerts retrieved for the requested objectives. Sorted by alert time descendingly. */ - count?: number | null; + modelMonitoringAlerts?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringAlert[]; /** - * Output only. Left bound of the bucket. + * The page token that can be used by the next ModelMonitoringService.SearchModelMonitoringAlerts call. */ - left?: number | null; + nextPageToken?: string | null; /** - * Output only. Right bound of the bucket. + * The total number of alerts retrieved by the requested objectives. */ - right?: number | null; + totalNumberAlerts?: string | null; } /** - * Tuning data statistics for Supervised Tuning. + * Filter for searching ModelMonitoringStats. */ - export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDataStats { - /** - * Output only. Number of billable characters in the tuning dataset. - */ - totalBillableCharacterCount?: string | null; - /** - * Output only. Number of tuning characters in the tuning dataset. - */ - totalTuningCharacterCount?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilter { /** - * Output only. Number of examples in the tuning dataset. + * Tabular statistics filter. */ - tuningDatasetExampleCount?: string | null; + tabularStatsFilter?: Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilterTabularStatsFilter; + } + /** + * Tabular statistics filter. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilterTabularStatsFilter { /** - * Output only. Number of tuning steps for this Tuning Job. + * Specify the algorithm type used for distance calculation, eg: jensen_shannon_divergence, l_infinity. */ - tuningStepCount?: string | null; + algorithm?: string | null; /** - * Output only. Sample user messages in the training dataset uri. + * From a particular monitoring job. */ - userDatasetExamples?: Schema$GoogleCloudAiplatformV1beta1Content[]; + modelMonitoringJob?: string | null; /** - * Output only. Dataset distributions for the user input tokens. + * From a particular monitoring schedule. */ - userInputTokenDistribution?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution; + modelMonitoringSchedule?: string | null; /** - * Output only. Dataset distributions for the messages per example. + * One of the supported monitoring objectives: `raw-feature-drift` `prediction-output-drift` `feature-attribution` */ - userMessagePerExampleDistribution?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution; + objectiveType?: string | null; /** - * Output only. Dataset distributions for the user output tokens. + * If not specified, will return all the stats_names. */ - userOutputTokenDistribution?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution; + statsName?: string | null; } /** - * Tuning Spec for Supervised Tuning. + * Request message for ModelMonitoringService.SearchModelMonitoringStats. */ - export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningSpec { + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsRequest { /** - * Optional. Hyperparameters for SFT. + * The standard list page size. */ - hyperParameters?: Schema$GoogleCloudAiplatformV1beta1SupervisedHyperParameters; + pageSize?: number | null; /** - * Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. + * A page token received from a previous ModelMonitoringService.SearchModelMonitoringStats call. */ - trainingDatasetUri?: string | null; + pageToken?: string | null; /** - * Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. + * Filter for search different stats. */ - validationDatasetUri?: string | null; + statsFilter?: Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsFilter; + /** + * The time interval for which results should be returned. + */ + timeInterval?: Schema$GoogleTypeInterval; } /** - * Request message for FeatureOnlineStoreAdminService.SyncFeatureView. - */ - export interface Schema$GoogleCloudAiplatformV1beta1SyncFeatureViewRequest {} - /** - * Respose message for FeatureOnlineStoreAdminService.SyncFeatureView. + * Response message for ModelMonitoringService.SearchModelMonitoringStats. */ - export interface Schema$GoogleCloudAiplatformV1beta1SyncFeatureViewResponse { + export interface Schema$GoogleCloudAiplatformV1beta1SearchModelMonitoringStatsResponse { /** - * Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}/featureViewSyncs/{feature_view_sync\}` + * Stats retrieved for requested objectives. */ - featureViewSync?: string | null; + monitoringStats?: Schema$GoogleCloudAiplatformV1beta1ModelMonitoringStats[]; + /** + * The page token that can be used by the next ModelMonitoringService.SearchModelMonitoringStats call. + */ + nextPageToken?: string | null; } /** - * A tensor value type. + * The request message for FeatureOnlineStoreService.SearchNearestEntities. */ - export interface Schema$GoogleCloudAiplatformV1beta1Tensor { + export interface Schema$GoogleCloudAiplatformV1beta1SearchNearestEntitiesRequest { /** - * Type specific representations that make it easy to create tensor protos in all languages. Only the representation corresponding to "dtype" can be set. The values hold the flattened representation of the tensor in row major order. BOOL + * Required. The query. */ - boolVal?: boolean[] | null; + query?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborQuery; /** - * STRING + * Optional. If set to true, the full entities (including all vector values and metadata) of the nearest neighbors are returned; otherwise only entity id of the nearest neighbors will be returned. Note that returning full entities will significantly increase the latency and cost of the query. */ - bytesVal?: string[] | null; + returnFullEntity?: boolean | null; + } + /** + * Response message for FeatureOnlineStoreService.SearchNearestEntities + */ + export interface Schema$GoogleCloudAiplatformV1beta1SearchNearestEntitiesResponse { /** - * DOUBLE + * The nearest neighbors of the query entity. */ - doubleVal?: number[] | null; + nearestNeighbors?: Schema$GoogleCloudAiplatformV1beta1NearestNeighbors; + } + /** + * Configuration for the use of custom service account to run the workloads. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ServiceAccountSpec { /** - * The data type of tensor. + * Required. If true, custom user-managed service account is enforced to run any workloads (for example, Vertex Jobs) on the resource. Otherwise, uses the [Vertex AI Custom Code Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). */ - dtype?: string | null; + enableCustomServiceAccount?: boolean | null; /** - * FLOAT + * Optional. Required when all below conditions are met * `enable_custom_service_account` is true; * any runtime is specified via `ResourceRuntimeSpec` on creation time, for example, Ray The users must have `iam.serviceAccounts.actAs` permission on this service account and then the specified runtime containers will run as it. Do not set this field if you want to submit jobs using custom service account to this PersistentResource after creation, but only specify the `service_account` inside the job. */ - floatVal?: number[] | null; + serviceAccount?: string | null; + } + /** + * A set of Shielded Instance options. See [Images using supported Shielded VM features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm). + */ + export interface Schema$GoogleCloudAiplatformV1beta1ShieldedVmConfig { /** - * INT64 + * Defines whether the instance has [Secure Boot](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot) enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. */ - int64Val?: string[] | null; + enableSecureBoot?: boolean | null; + } + /** + * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + */ + export interface Schema$GoogleCloudAiplatformV1beta1SmoothGradConfig { /** - * INT_8 INT_16 INT_32 + * This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features. */ - intVal?: number[] | null; + featureNoiseSigma?: Schema$GoogleCloudAiplatformV1beta1FeatureNoiseSigma; /** - * A list of tensor values. + * This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature. */ - listVal?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; + noiseSigma?: number | null; /** - * Shape of the tensor. + * The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3. */ - shape?: string[] | null; + noisySampleCount?: number | null; + } + /** + * SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers and workers. Managers are responsible for managing the workers in this pool as well as customers' data labeling jobs associated with this pool. Customers create specialist pool as well as start data labeling jobs on Cloud, managers and workers handle the jobs using CrowdCompute console. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SpecialistPool { /** - * STRING + * Required. The user-defined name of the SpecialistPool. The name can be up to 128 characters long and can consist of any UTF-8 characters. This field should be unique on project-level. + */ + displayName?: string | null; + /** + * Required. The resource name of the SpecialistPool. */ - stringVal?: string[] | null; + name?: string | null; /** - * A map of string to tensor. + * Output only. The resource name of the pending data labeling jobs. */ - structVal?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1Tensor; - } | null; + pendingDataLabelingJobs?: string[] | null; /** - * Serialized raw tensor content. + * The email addresses of the managers in the SpecialistPool. */ - tensorVal?: string | null; + specialistManagerEmails?: string[] | null; /** - * UINT64 + * Output only. The number of managers in this SpecialistPool. */ - uint64Val?: string[] | null; + specialistManagersCount?: number | null; /** - * UINT8 UINT16 UINT32 + * The email addresses of workers in the SpecialistPool. */ - uintVal?: number[] | null; + specialistWorkerEmails?: string[] | null; } /** - * Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a Google Cloud project. If needed users can also create extra Tensorboards in their projects. + * Metadata information for NotebookService.StartNotebookRuntime. */ - export interface Schema$GoogleCloudAiplatformV1beta1Tensorboard { + export interface Schema$GoogleCloudAiplatformV1beta1StartNotebookRuntimeOperationMetadata { /** - * Output only. Consumer project Cloud Storage path prefix used to store blob data, which can either be a bucket or directory. Does not end with a '/'. + * The operation generic information. */ - blobStoragePathPrefix?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Output only. Timestamp when this Tensorboard was created. + * A human-readable message that shows the intermediate progress details of NotebookRuntime. */ - createTime?: string | null; + progressMessage?: string | null; + } + /** + * Request message for NotebookService.StartNotebookRuntime. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StartNotebookRuntimeRequest {} + /** + * Request message for VizierService.StopTrial. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StopTrialRequest {} + /** + * Assigns input data to the training, validation, and test sets so that the distribution of values found in the categorical column (as specified by the `key` field) is mirrored within each split. The fraction values determine the relative sizes of the splits. For example, if the specified column has three values, with 50% of the rows having value "A", 25% value "B", and 25% value "C", and the split fractions are specified as 80/10/10, then the training set will constitute 80% of the training data, with about 50% of the training set rows having the value "A" for the specified column, about 25% having the value "B", and about 25% having the value "C". Only the top 500 occurring values are used; any values not in the top 500 values are randomly assigned to a split. If less than three rows contain a specific value, those rows are randomly assigned. Supported only for tabular Datasets. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StratifiedSplit { /** - * Description of this Tensorboard. + * Required. The key is a name of one of the Dataset's data columns. The key provided must be for a categorical column. */ - description?: string | null; + key?: string | null; /** - * Required. User provided name of this Tensorboard. + * The fraction of the input data that is to be used to evaluate the Model. */ - displayName?: string | null; + testFraction?: number | null; /** - * Customer-managed encryption key spec for a Tensorboard. If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key. + * The fraction of the input data that is to be used to train the Model. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + trainingFraction?: number | null; /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The fraction of the input data that is to be used to validate the Model. */ - etag?: string | null; + validationFraction?: number | null; + } + /** + * Request message for FeatureOnlineStoreService.StreamingFetchFeatureValues. For the entities requested, all features under the requested feature view will be returned. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StreamingFetchFeatureValuesRequest { /** - * Used to indicate if the TensorBoard instance is the default one. Each project & region can have at most one default TensorBoard instance. Creation of a default TensorBoard instance and updating an existing TensorBoard instance to be default will mark all other TensorBoard instances (if any) as non default. + * Specify response data format. If not set, KeyValue format will be used. */ - isDefault?: boolean | null; + dataFormat?: string | null; + dataKeys?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey[]; + } + /** + * Response message for FeatureOnlineStoreService.StreamingFetchFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StreamingFetchFeatureValuesResponse { + data?: Schema$GoogleCloudAiplatformV1beta1FetchFeatureValuesResponse[]; + dataKeysWithError?: Schema$GoogleCloudAiplatformV1beta1FeatureViewDataKey[]; /** - * The labels with user-defined metadata to organize your Tensorboards. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Tensorboard (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Response status. If OK, then StreamingFetchFeatureValuesResponse.data will be populated. Otherwise StreamingFetchFeatureValuesResponse.data_keys_with_error will be populated with the appropriate data keys. The error only applies to the listed data keys - the stream will remain open for further FeatureOnlineStoreService.StreamingFetchFeatureValuesRequest requests. */ - labels?: {[key: string]: string} | null; + status?: Schema$GoogleRpcStatus; + } + /** + * Request message for PredictionService.StreamingPredict. The first message must contain endpoint field and optionally input. The subsequent messages must contain input. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StreamingPredictRequest { /** - * Output only. Name of the Tensorboard. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` + * The prediction input. */ - name?: string | null; + inputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; /** - * Output only. The number of Runs stored in this Tensorboard. + * The parameters that govern the prediction. */ - runCount?: number | null; + parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; + } + /** + * Response message for PredictionService.StreamingPredict. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StreamingPredictResponse { /** - * Output only. Timestamp when this Tensorboard was last updated. + * The prediction output. */ - updateTime?: string | null; + outputs?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; + /** + * The parameters that govern the prediction. + */ + parameters?: Schema$GoogleCloudAiplatformV1beta1Tensor; } /** - * One blob (e.g, image, graph) viewable on a blob metric plot. + * Request message for FeaturestoreOnlineServingService.StreamingFeatureValuesRead. */ - export interface Schema$GoogleCloudAiplatformV1beta1TensorboardBlob { + export interface Schema$GoogleCloudAiplatformV1beta1StreamingReadFeatureValuesRequest { /** - * Optional. The bytes of the blob is not present unless it's returned by the ReadTensorboardBlobData endpoint. + * Required. IDs of entities to read Feature values of. The maximum number of IDs is 100. For example, for a machine learning model predicting user clicks on a website, an entity ID could be `user_123`. */ - data?: string | null; + entityIds?: string[] | null; /** - * Output only. A URI safe key uniquely identifying a blob. Can be used to locate the blob stored in the Cloud Storage bucket of the consumer project. + * Required. Selector choosing Features of the target EntityType. Feature IDs will be deduplicated. */ - id?: string | null; + featureSelector?: Schema$GoogleCloudAiplatformV1beta1FeatureSelector; } /** - * One point viewable on a blob metric plot, but mostly just a wrapper message to work around repeated fields can't be used directly within `oneof` fields. + * A list of string values. */ - export interface Schema$GoogleCloudAiplatformV1beta1TensorboardBlobSequence { + export interface Schema$GoogleCloudAiplatformV1beta1StringArray { /** - * List of blobs contained within the sequence. + * A list of string values. */ - values?: Schema$GoogleCloudAiplatformV1beta1TensorboardBlob[]; + values?: string[] | null; } /** - * A TensorboardExperiment is a group of TensorboardRuns, that are typically the results of a training job run, in a Tensorboard. + * A message representing a Study. */ - export interface Schema$GoogleCloudAiplatformV1beta1TensorboardExperiment { + export interface Schema$GoogleCloudAiplatformV1beta1Study { /** - * Output only. Timestamp when this TensorboardExperiment was created. + * Output only. Time at which the study was created. */ createTime?: string | null; /** - * Description of this TensorboardExperiment. - */ - description?: string | null; - /** - * User provided name of this TensorboardExperiment. + * Required. Describes the Study, default value is empty string. */ displayName?: string | null; /** - * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - */ - etag?: string | null; - /** - * The labels with user-defined metadata to organize your TensorboardExperiment. Label keys and values cannot be longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with `aiplatform.googleapis.com/` and are immutable. The following system labels exist for each Dataset: * `aiplatform.googleapis.com/dataset_metadata_schema`: output only. Its value is the metadata_schema's title. + * Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. */ - labels?: {[key: string]: string} | null; + inactiveReason?: string | null; /** - * Output only. Name of the TensorboardExperiment. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` + * Output only. The name of a study. The study's globally unique identifier. Format: `projects/{project\}/locations/{location\}/studies/{study\}` */ name?: string | null; /** - * Immutable. Source of the TensorboardExperiment. Example: a custom training job. + * Output only. The detailed state of a Study. */ - source?: string | null; + state?: string | null; /** - * Output only. Timestamp when this TensorboardExperiment was last updated. + * Required. Configuration of the Study. */ - updateTime?: string | null; + studySpec?: Schema$GoogleCloudAiplatformV1beta1StudySpec; } /** - * TensorboardRun maps to a specific execution of a training job with a given set of hyperparameter values, model definition, dataset, etc + * Represents specification of a Study. */ - export interface Schema$GoogleCloudAiplatformV1beta1TensorboardRun { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpec { /** - * Output only. Timestamp when this TensorboardRun was created. + * The search algorithm specified for the Study. */ - createTime?: string | null; + algorithm?: string | null; /** - * Description of this TensorboardRun. + * The automated early stopping spec using convex stopping rule. */ - description?: string | null; + convexAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecConvexAutomatedStoppingSpec; /** - * Required. User provided name of this TensorboardRun. This value must be unique among all TensorboardRuns belonging to the same parent TensorboardExperiment. + * Deprecated. The automated early stopping using convex stopping rule. */ - displayName?: string | null; + convexStopConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecConvexStopConfig; /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * The automated early stopping spec using decay curve rule. */ - etag?: string | null; + decayCurveStoppingSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecDecayCurveAutomatedStoppingSpec; /** - * The labels with user-defined metadata to organize your TensorboardRuns. This field will be used to filter and visualize Runs in the Tensorboard UI. For example, a Vertex AI training job can set a label aiplatform.googleapis.com/training_job_id=xxxxx to all the runs created within that job. An end user can set a label experiment_id=xxxxx for all the runs produced in a Jupyter notebook. These runs can be grouped by a label value and visualized together in the Tensorboard UI. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one TensorboardRun (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. + * Describe which measurement selection type will be used */ - labels?: {[key: string]: string} | null; + measurementSelectionType?: string | null; /** - * Output only. Name of the TensorboardRun. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` + * The automated early stopping spec using median rule. */ - name?: string | null; + medianAutomatedStoppingSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecMedianAutomatedStoppingSpec; /** - * Output only. Timestamp when this TensorboardRun was last updated. + * Required. Metric specs for the Study. */ - updateTime?: string | null; - } - /** - * One point viewable on a tensor metric plot. - */ - export interface Schema$GoogleCloudAiplatformV1beta1TensorboardTensor { + metrics?: Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpec[]; /** - * Required. Serialized form of https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto + * The observation noise level of the study. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - value?: string | null; + observationNoise?: string | null; /** - * Optional. Version number of TensorProto used to serialize value. + * Required. The set of parameters to tune. */ - versionNumber?: number | null; - } - /** - * TensorboardTimeSeries maps to times series produced in training runs - */ - export interface Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries { + parameters?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpec[]; /** - * Output only. Timestamp when this TensorboardTimeSeries was created. + * Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. */ - createTime?: string | null; + studyStoppingConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig; /** - * Description of this TensorboardTimeSeries. + * The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob */ - description?: string | null; + transferLearningConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig; + } + /** + * Configuration for ConvexAutomatedStoppingSpec. When there are enough completed trials (configured by min_measurement_count), for pending trials with enough measurements and steps, the policy first computes an overestimate of the objective value at max_num_steps according to the slope of the incomplete objective value curve. No prediction can be made if the curve is completely flat. If the overestimation is worse than the best objective value of the completed trials, this pending trial will be early-stopped, but a last measurement will be added to the pending trial with max_num_steps and predicted objective value from the autoregression model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecConvexAutomatedStoppingSpec { /** - * Required. User provided name of this TensorboardTimeSeries. This value should be unique among all TensorboardTimeSeries resources belonging to the same TensorboardRun resource (parent resource). + * The hyper-parameter name used in the tuning job that stands for learning rate. Leave it blank if learning rate is not in a parameter in tuning. The learning_rate is used to estimate the objective value of the ongoing trial. */ - displayName?: string | null; + learningRateParameterName?: string | null; /** - * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + * Steps used in predicting the final objective for early stopped trials. In general, it's set to be the same as the defined steps in training / tuning. If not defined, it will learn it from the completed trials. When use_steps is false, this field is set to the maximum elapsed seconds. */ - etag?: string | null; + maxStepCount?: string | null; /** - * Output only. Scalar, Tensor, or Blob metadata for this TensorboardTimeSeries. + * The minimal number of measurements in a Trial. Early-stopping checks will not trigger if less than min_measurement_count+1 completed trials or pending trials with less than min_measurement_count measurements. If not defined, the default value is 5. */ - metadata?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeriesMetadata; + minMeasurementCount?: string | null; /** - * Output only. Name of the TensorboardTimeSeries. + * Minimum number of steps for a trial to complete. Trials which do not have a measurement with step_count \> min_step_count won't be considered for early stopping. It's ok to set it to 0, and a trial can be early stopped at any stage. By default, min_step_count is set to be one-tenth of the max_step_count. When use_elapsed_duration is true, this field is set to the minimum elapsed seconds. */ - name?: string | null; + minStepCount?: string | null; /** - * Data of the current plugin, with the size limited to 65KB. + * ConvexAutomatedStoppingSpec by default only updates the trials that needs to be early stopped using a newly trained auto-regressive model. When this flag is set to True, all stopped trials from the beginning are potentially updated in terms of their `final_measurement`. Also, note that the training logic of autoregressive models is different in this case. Enabling this option has shown better results and this may be the default option in the future. */ - pluginData?: string | null; + updateAllStoppedTrials?: boolean | null; /** - * Immutable. Name of the plugin this time series pertain to. Such as Scalar, Tensor, Blob + * This bool determines whether or not the rule is applied based on elapsed_secs or steps. If use_elapsed_duration==false, the early stopping decision is made according to the predicted objective values according to the target steps. If use_elapsed_duration==true, elapsed_secs is used instead of steps. Also, in this case, the parameters max_num_steps and min_num_steps are overloaded to contain max_elapsed_seconds and min_elapsed_seconds. */ - pluginName?: string | null; + useElapsedDuration?: boolean | null; + } + /** + * Configuration for ConvexStopPolicy. + */ + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecConvexStopConfig { /** - * Output only. Timestamp when this TensorboardTimeSeries was last updated. + * The number of Trial measurements used in autoregressive model for value prediction. A trial won't be considered early stopping if has fewer measurement points. */ - updateTime?: string | null; + autoregressiveOrder?: string | null; /** - * Required. Immutable. Type of TensorboardTimeSeries value. + * The hyper-parameter name used in the tuning job that stands for learning rate. Leave it blank if learning rate is not in a parameter in tuning. The learning_rate is used to estimate the objective value of the ongoing trial. */ - valueType?: string | null; - } - /** - * Describes metadata for a TensorboardTimeSeries. - */ - export interface Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeriesMetadata { + learningRateParameterName?: string | null; /** - * Output only. The largest blob sequence length (number of blobs) of all data points in this time series, if its ValueType is BLOB_SEQUENCE. + * Steps used in predicting the final objective for early stopped trials. In general, it's set to be the same as the defined steps in training / tuning. When use_steps is false, this field is set to the maximum elapsed seconds. */ - maxBlobSequenceLength?: string | null; + maxNumSteps?: string | null; /** - * Output only. Max step index of all data points within a TensorboardTimeSeries. + * Minimum number of steps for a trial to complete. Trials which do not have a measurement with num_steps \> min_num_steps won't be considered for early stopping. It's ok to set it to 0, and a trial can be early stopped at any stage. By default, min_num_steps is set to be one-tenth of the max_num_steps. When use_steps is false, this field is set to the minimum elapsed seconds. */ - maxStep?: string | null; + minNumSteps?: string | null; /** - * Output only. Max wall clock timestamp of all data points within a TensorboardTimeSeries. + * This bool determines whether or not the rule is applied based on elapsed_secs or steps. If use_seconds==false, the early stopping decision is made according to the predicted objective values according to the target steps. If use_seconds==true, elapsed_secs is used instead of steps. Also, in this case, the parameters max_num_steps and min_num_steps are overloaded to contain max_elapsed_seconds and min_elapsed_seconds. */ - maxWallTime?: string | null; + useSeconds?: boolean | null; } /** - * The storage details for TFRecord output content. + * The decay curve automated stopping rule builds a Gaussian Process Regressor to predict the final objective value of a Trial based on the already completed Trials and the intermediate measurements of the current Trial. Early stopping is requested for the current Trial if there is very low probability to exceed the optimal value found so far. */ - export interface Schema$GoogleCloudAiplatformV1beta1TFRecordDestination { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecDecayCurveAutomatedStoppingSpec { /** - * Required. Google Cloud Storage location. + * True if Measurement.elapsed_duration is used as the x-axis of each Trials Decay Curve. Otherwise, Measurement.step_count will be used as the x-axis. */ - gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; + useElapsedDuration?: boolean | null; } /** - * The config for feature monitoring threshold. + * The median automated stopping rule stops a pending Trial if the Trial's best objective_value is strictly below the median 'performance' of all completed Trials reported up to the Trial's last measurement. Currently, 'performance' refers to the running average of the objective values reported by the Trial in each measurement. */ - export interface Schema$GoogleCloudAiplatformV1beta1ThresholdConfig { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecMedianAutomatedStoppingSpec { /** - * Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. + * True if median automated stopping rule applies on Measurement.elapsed_duration. It means that elapsed_duration field of latest measurement of current Trial is used to compute median objective value for each completed Trials. */ - value?: number | null; + useElapsedDuration?: boolean | null; } /** - * All the data stored in a TensorboardTimeSeries. + * Represents a metric to optimize. */ - export interface Schema$GoogleCloudAiplatformV1beta1TimeSeriesData { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpec { /** - * Required. The ID of the TensorboardTimeSeries, which will become the final component of the TensorboardTimeSeries' resource name + * Required. The optimization goal of the metric. */ - tensorboardTimeSeriesId?: string | null; + goal?: string | null; /** - * Required. Data points in this time series. + * Required. The ID of the metric. Must not contain whitespaces and must be unique amongst all MetricSpecs. */ - values?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesDataPoint[]; + metricId?: string | null; /** - * Required. Immutable. The value type of this time series. All the values in this time series data must match this value type. + * Used for safe search. In the case, the metric will be a safety metric. You must provide a separate metric for objective metric. */ - valueType?: string | null; + safetyConfig?: Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpecSafetyMetricConfig; } /** - * A TensorboardTimeSeries data point. + * Used in safe optimization to specify threshold levels and risk tolerance. */ - export interface Schema$GoogleCloudAiplatformV1beta1TimeSeriesDataPoint { - /** - * A blob sequence value. - */ - blobs?: Schema$GoogleCloudAiplatformV1beta1TensorboardBlobSequence; - /** - * A scalar value. - */ - scalar?: Schema$GoogleCloudAiplatformV1beta1Scalar; - /** - * Step index of this data point within the run. - */ - step?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecMetricSpecSafetyMetricConfig { /** - * A tensor value. + * Desired minimum fraction of safe trials (over total number of trials) that should be targeted by the algorithm at any time during the study (best effort). This should be between 0.0 and 1.0 and a value of 0.0 means that there is no minimum and an algorithm proceeds without targeting any specific fraction. A value of 1.0 means that the algorithm attempts to only Suggest safe Trials. */ - tensor?: Schema$GoogleCloudAiplatformV1beta1TensorboardTensor; + desiredMinSafeTrialsFraction?: number | null; /** - * Wall clock timestamp when this data point is generated by the end user. + * Safety threshold (boundary value between safe and unsafe). NOTE that if you leave SafetyMetricConfig unset, a default value of 0 will be used. */ - wallTime?: string | null; + safetyThreshold?: number | null; } /** - * Assigns input data to training, validation, and test sets based on a provided timestamps. The youngest data pieces are assigned to training set, next to validation set, and the oldest to the test set. Supported only for tabular Datasets. + * Represents a single parameter to optimize. */ - export interface Schema$GoogleCloudAiplatformV1beta1TimestampSplit { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpec { /** - * Required. The key is a name of one of the Dataset's data columns. The values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. + * The value spec for a 'CATEGORICAL' parameter. */ - key?: string | null; + categoricalValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecCategoricalValueSpec; /** - * The fraction of the input data that is to be used to evaluate the Model. + * A conditional parameter node is active if the parameter's value matches the conditional node's parent_value_condition. If two items in conditional_parameter_specs have the same name, they must have disjoint parent_value_condition. */ - testFraction?: number | null; + conditionalParameterSpecs?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpec[]; /** - * The fraction of the input data that is to be used to train the Model. + * The value spec for a 'DISCRETE' parameter. */ - trainingFraction?: number | null; + discreteValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDiscreteValueSpec; /** - * The fraction of the input data that is to be used to validate the Model. + * The value spec for a 'DOUBLE' parameter. */ - validationFraction?: number | null; - } - /** - * Tokens info with a list of tokens and the corresponding list of token ids. - */ - export interface Schema$GoogleCloudAiplatformV1beta1TokensInfo { + doubleValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDoubleValueSpec; /** - * A list of token ids from the input. + * The value spec for an 'INTEGER' parameter. */ - tokenIds?: string[] | null; + integerValueSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecIntegerValueSpec; /** - * A list of tokens from the input. + * Required. The ID of the parameter. Must not contain whitespaces and must be unique amongst all ParameterSpecs. */ - tokens?: string[] | null; + parameterId?: string | null; + /** + * How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. + */ + scaleType?: string | null; } /** - * Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). + * Value specification for a parameter in `CATEGORICAL` type. */ - export interface Schema$GoogleCloudAiplatformV1beta1Tool { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecCategoricalValueSpec { /** - * Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. + * A default value for a `CATEGORICAL` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - functionDeclarations?: Schema$GoogleCloudAiplatformV1beta1FunctionDeclaration[]; + defaultValue?: string | null; /** - * Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. + * Required. The list of possible categories. */ - retrieval?: Schema$GoogleCloudAiplatformV1beta1Retrieval; + values?: string[] | null; } /** - * Input for tool call valid metric. + * Represents a parameter spec with condition from its parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidInput { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpec { /** - * Required. Repeated tool call valid instances. + * Required. The spec for a conditional parameter. */ - instances?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidInstance[]; + parameterSpec?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpec; /** - * Required. Spec for tool call valid metric. + * The spec for matching values from a parent parameter of `CATEGORICAL` type. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidSpec; - } - /** - * Spec for tool call valid instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidInstance { + parentCategoricalValues?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition; /** - * Required. Output of the evaluated model. + * The spec for matching values from a parent parameter of `DISCRETE` type. */ - prediction?: string | null; + parentDiscreteValues?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition; /** - * Required. Ground truth used to compare against the prediction. + * The spec for matching values from a parent parameter of `INTEGER` type. */ - reference?: string | null; + parentIntValues?: Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecIntValueCondition; } /** - * Tool call valid metric value for an instance. + * Represents the spec to match categorical values from parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidMetricValue { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition { /** - * Output only. Tool call valid score. + * Required. Matches values of the parent parameter of 'CATEGORICAL' type. All values must exist in `categorical_value_spec` of parent parameter. */ - score?: number | null; + values?: string[] | null; } /** - * Results for tool call valid metric. + * Represents the spec to match discrete values from parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidResults { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition { /** - * Output only. Tool call valid metric values. + * Required. Matches values of the parent parameter of 'DISCRETE' type. All values must exist in `discrete_value_spec` of parent parameter. The Epsilon of the value matching is 1e-10. */ - toolCallValidMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidMetricValue[]; + values?: number[] | null; } /** - * Spec for tool call valid metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidSpec {} - /** - * Tool config. This config is shared for all tools provided in the request. + * Represents the spec to match integer values from parent parameter. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolConfig { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecConditionalParameterSpecIntValueCondition { /** - * Optional. Function calling config. + * Required. Matches values of the parent parameter of 'INTEGER' type. All values must lie in `integer_value_spec` of parent parameter. */ - functionCallingConfig?: Schema$GoogleCloudAiplatformV1beta1FunctionCallingConfig; + values?: string[] | null; } /** - * Input for tool name match metric. + * Value specification for a parameter in `DISCRETE` type. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInput { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDiscreteValueSpec { /** - * Required. Repeated tool name match instances. + * A default value for a `DISCRETE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. It automatically rounds to the nearest feasible discrete point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - instances?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInstance[]; + defaultValue?: number | null; /** - * Required. Spec for tool name match metric. + * Required. A list of possible values. The list should be in increasing order and at least 1e-10 apart. For instance, this parameter might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchSpec; + values?: number[] | null; } /** - * Spec for tool name match instance. + * Value specification for a parameter in `DOUBLE` type. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInstance { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecDoubleValueSpec { /** - * Required. Output of the evaluated model. + * A default value for a `DOUBLE` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - prediction?: string | null; + defaultValue?: number | null; /** - * Required. Ground truth used to compare against the prediction. + * Required. Inclusive maximum value of the parameter. */ - reference?: string | null; - } - /** - * Tool name match metric value for an instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchMetricValue { + maxValue?: number | null; /** - * Output only. Tool name match score. + * Required. Inclusive minimum value of the parameter. */ - score?: number | null; + minValue?: number | null; } /** - * Results for tool name match metric. + * Value specification for a parameter in `INTEGER` type. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchResults { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecParameterSpecIntegerValueSpec { /** - * Output only. Tool name match metric values. + * A default value for an `INTEGER` parameter that is assumed to be a relatively good starting point. Unset value signals that there is no offered starting point. Currently only supported by the Vertex AI Vizier service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ - toolNameMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchMetricValue[]; - } - /** - * Spec for tool name match metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchSpec {} - /** - * Input for tool parameter key match metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInput { + defaultValue?: string | null; /** - * Required. Repeated tool parameter key match instances. + * Required. Inclusive maximum value of the parameter. */ - instances?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInstance[]; + maxValue?: string | null; /** - * Required. Spec for tool parameter key match metric. + * Required. Inclusive minimum value of the parameter. */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchSpec; + minValue?: string | null; } /** - * Spec for tool parameter key match instance. + * The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInstance { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig { /** - * Required. Output of the evaluated model. + * If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. */ - prediction?: string | null; + maxDurationNoProgress?: string | null; /** - * Required. Ground truth used to compare against the prediction. + * If the specified time or duration has passed, stop the study. */ - reference?: string | null; - } - /** - * Tool parameter key match metric value for an instance. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchMetricValue { + maximumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1beta1StudyTimeConstraint; + /** + * If there are more than this many trials, stop the study. + */ + maxNumTrials?: number | null; /** - * Output only. Tool parameter key match score. + * If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. */ - score?: number | null; - } - /** - * Results for tool parameter key match metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchResults { + maxNumTrialsNoProgress?: number | null; /** - * Output only. Tool parameter key match metric values. + * Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. */ - toolParameterKeyMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchMetricValue[]; - } - /** - * Spec for tool parameter key match metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchSpec {} - /** - * Input for tool parameter key value match metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInput { + minimumRuntimeConstraint?: Schema$GoogleCloudAiplatformV1beta1StudyTimeConstraint; /** - * Required. Repeated tool parameter key value match instances. + * If there are fewer than this many COMPLETED trials, do not stop the study. */ - instances?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInstance[]; + minNumTrials?: number | null; /** - * Required. Spec for tool parameter key value match metric. + * If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). */ - metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchSpec; + shouldStopAsap?: boolean | null; } /** - * Spec for tool parameter key value match instance. + * This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInstance { + export interface Schema$GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig { /** - * Required. Output of the evaluated model. + * Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. */ - prediction?: string | null; + disableTransferLearning?: boolean | null; /** - * Required. Ground truth used to compare against the prediction. + * Output only. Names of previously completed studies */ - reference?: string | null; + priorStudyNames?: string[] | null; } /** - * Tool parameter key value match metric value for an instance. + * Time-based Constraint for Study */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchMetricValue { + export interface Schema$GoogleCloudAiplatformV1beta1StudyTimeConstraint { /** - * Output only. Tool parameter key value match score. + * Compares the wallclock time to this time. Must use UTC timezone. */ - score?: number | null; - } - /** - * Results for tool parameter key value match metric. - */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchResults { + endTime?: string | null; /** - * Output only. Tool parameter key value match metric values. + * Counts the wallclock time passed since the creation of this Study. */ - toolParameterKvMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchMetricValue[]; + maxDuration?: string | null; } /** - * Spec for tool parameter key value match metric. + * Details of operations that perform Trials suggestion. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchSpec { + export interface Schema$GoogleCloudAiplatformV1beta1SuggestTrialsMetadata { /** - * Optional. Whether to use STRCIT string match on parameter values. + * The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. */ - useStrictStringMatch?: boolean | null; + clientId?: string | null; + /** + * Operation metadata for suggesting Trials. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * A single example of the tool usage. + * Request message for VizierService.SuggestTrials. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolUseExample { + export interface Schema$GoogleCloudAiplatformV1beta1SuggestTrialsRequest { /** - * Required. The display name for example. + * Required. The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested Trial if the Trial is pending, and provide a new Trial if the last suggested Trial was completed. */ - displayName?: string | null; + clientId?: string | null; /** - * Extension operation to call. + * Optional. This allows you to specify the "context" for a Trial; a context is a slice (a subspace) of the search space. Typical uses for contexts: 1) You are using Vizier to tune a server for best performance, but there's a strong weekly cycle. The context specifies the day-of-week. This allows Tuesday to generalize from Wednesday without assuming that everything is identical. 2) Imagine you're optimizing some medical treatment for people. As they walk in the door, you know certain facts about them (e.g. sex, weight, height, blood-pressure). Put that information in the context, and Vizier will adapt its suggestions to the patient. 3) You want to do a fair A/B test efficiently. Specify the "A" and "B" conditions as contexts, and Vizier will generalize between "A" and "B" conditions. If they are similar, this will allow Vizier to converge to the optimum faster than if "A" and "B" were separate Studies. NOTE: You can also enter contexts as REQUESTED Trials, e.g. via the CreateTrial() RPC; that's the asynchronous option where you don't need a close association between contexts and suggestions. NOTE: All the Parameters you set in a context MUST be defined in the Study. NOTE: You must supply 0 or $suggestion_count contexts. If you don't supply any contexts, Vizier will make suggestions from the full search space specified in the StudySpec; if you supply a full set of context, each suggestion will match the corresponding context. NOTE: A Context with no features set matches anything, and allows suggestions from the full search space. NOTE: Contexts MUST lie within the search space specified in the StudySpec. It's an error if they don't. NOTE: Contexts preferentially match ACTIVE then REQUESTED trials before new suggestions are generated. NOTE: Generation of suggestions involves a match between a Context and (optionally) a REQUESTED trial; if that match is not fully specified, a suggestion will be geneated in the merged subspace. */ - extensionOperation?: Schema$GoogleCloudAiplatformV1beta1ToolUseExampleExtensionOperation; + contexts?: Schema$GoogleCloudAiplatformV1beta1TrialContext[]; /** - * Function name to call. + * Required. The number of suggestions requested. It must be positive. */ - functionName?: string | null; + suggestionCount?: number | null; + } + /** + * Response message for VizierService.SuggestTrials. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SuggestTrialsResponse { /** - * Required. Query that should be routed to this tool. + * The time at which operation processing completed. */ - query?: string | null; + endTime?: string | null; /** - * Request parameters used for executing this tool. + * The time at which the operation was started. */ - requestParams?: {[key: string]: any} | null; + startTime?: string | null; /** - * Response parameters generated by this tool. + * The state of the Study. */ - responseParams?: {[key: string]: any} | null; + studyState?: string | null; /** - * Summary of the tool response to the user query. + * A list of Trials. */ - responseSummary?: string | null; + trials?: Schema$GoogleCloudAiplatformV1beta1Trial[]; } /** - * Identifies one operation of the extension. + * Input for summarization helpfulness metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1ToolUseExampleExtensionOperation { + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInput { /** - * Resource name of the extension. + * Required. Summarization helpfulness instance. */ - extension?: string | null; + instance?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInstance; /** - * Required. Operation ID of the extension. + * Required. Spec for summarization helpfulness score metric. */ - operationId?: string | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessSpec; } /** - * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. + * Spec for summarization helpfulness instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1TrainingConfig { + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInstance { /** - * The timeout hours for the CMLE training job, expressed in milli hours i.e. 1,000 value in this field means 1 hour. + * Required. Text to be summarized. */ - timeoutTrainingMilliHours?: string | null; - } - /** - * The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from Vertex AI's Dataset which becomes the training input, upload the Model to Vertex AI, and evaluate the Model. - */ - export interface Schema$GoogleCloudAiplatformV1beta1TrainingPipeline { + context?: string | null; /** - * Output only. Time when the TrainingPipeline was created. + * Optional. Summarization prompt for LLM. */ - createTime?: string | null; + instruction?: string | null; /** - * Required. The user-defined name of this TrainingPipeline. + * Required. Output of the evaluated model. */ - displayName?: string | null; + prediction?: string | null; /** - * Customer-managed encryption key spec for a TrainingPipeline. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if model_to_upload is not set separately. + * Optional. Ground truth used to compare against the prediction. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + reference?: string | null; + } + /** + * Spec for summarization helpfulness result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessResult { /** - * Output only. Time when the TrainingPipeline entered any of the following states: `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, `PIPELINE_STATE_CANCELLED`. + * Output only. Confidence for summarization helpfulness score. */ - endTime?: string | null; + confidence?: number | null; /** - * Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or `PIPELINE_STATE_CANCELLED`. + * Output only. Explanation for summarization helpfulness score. */ - error?: Schema$GoogleRpcStatus; + explanation?: string | null; /** - * Specifies Vertex AI owned input data that may be used for training the Model. The TrainingPipeline's training_task_definition should make clear whether this config is used and if there are any special requirements on how it should be filled. If nothing about this config is mentioned in the training_task_definition, then it should be assumed that the TrainingPipeline does not depend on this configuration. + * Output only. Summarization Helpfulness score. */ - inputDataConfig?: Schema$GoogleCloudAiplatformV1beta1InputDataConfig; + score?: number | null; + } + /** + * Spec for summarization helpfulness score metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationHelpfulnessSpec { /** - * The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Optional. Whether to use instance.reference to compute summarization helpfulness. */ - labels?: {[key: string]: string} | null; + useReference?: boolean | null; /** - * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + * Optional. Which version to use for evaluation. */ - modelId?: string | null; + version?: number | null; + } + /** + * Input for summarization quality metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInput { /** - * Describes the Model that may be uploaded (via ModelService.UploadModel) by this TrainingPipeline. The TrainingPipeline's training_task_definition should make clear whether this Model description should be populated, and if there are any special requirements regarding how it should be filled. If nothing is mentioned in the training_task_definition, then it should be assumed that this field should not be filled and the training task either uploads the Model without a need of this information, or that training task does not support uploading a Model as part of the pipeline. When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and the trained Model had been uploaded into Vertex AI, then the model_to_upload's resource name is populated. The Model is always uploaded into the Project and Location in which this pipeline is. + * Required. Summarization quality instance. */ - modelToUpload?: Schema$GoogleCloudAiplatformV1beta1Model; + instance?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInstance; /** - * Output only. Resource name of the TrainingPipeline. + * Required. Spec for summarization quality score metric. */ - name?: string | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1SummarizationQualitySpec; + } + /** + * Spec for summarization quality instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualityInstance { /** - * Optional. When specify this field, the `model_to_upload` will not be uploaded as a new model, instead, it will become a new version of this `parent_model`. + * Required. Text to be summarized. */ - parentModel?: string | null; + context?: string | null; /** - * Output only. Time when the TrainingPipeline for the first time entered the `PIPELINE_STATE_RUNNING` state. + * Required. Summarization prompt for LLM. */ - startTime?: string | null; + instruction?: string | null; /** - * Output only. The detailed state of the pipeline. + * Required. Output of the evaluated model. */ - state?: string | null; + prediction?: string | null; /** - * Required. A Google Cloud Storage path to the YAML file that defines the training task which is responsible for producing the model artifact, and may also include additional auxiliary work. The definition files that can be used here are found in gs://google-cloud-aiplatform/schema/trainingjob/definition/. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + * Optional. Ground truth used to compare against the prediction. */ - trainingTaskDefinition?: string | null; + reference?: string | null; + } + /** + * Spec for summarization quality result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualityResult { /** - * Required. The training task's parameter(s), as specified in the training_task_definition's `inputs`. + * Output only. Confidence for summarization quality score. */ - trainingTaskInputs?: any | null; + confidence?: number | null; /** - * Output only. The metadata information as specified in the training_task_definition's `metadata`. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's training_task_definition contains `metadata` object. + * Output only. Explanation for summarization quality score. */ - trainingTaskMetadata?: any | null; + explanation?: string | null; /** - * Output only. Time when the TrainingPipeline was most recently updated. + * Output only. Summarization Quality score. */ - updateTime?: string | null; + score?: number | null; } /** - * A message representing a Trial. A Trial contains a unique set of Parameters that has been or will be evaluated, along with the objective metrics got by running the Trial. + * Spec for summarization quality score metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1Trial { - /** - * Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique client_id. When a client asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client should evaluate the Trial, complete it, and report back to Vertex AI Vizier. If suggestion is asked again by same client_id before the Trial is completed, the same Trial will be returned. Multiple clients with different client_ids can ask for suggestions simultaneously, each of them will get their own Trial. - */ - clientId?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationQualitySpec { /** - * Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. + * Optional. Whether to use instance.reference to compute summarization quality. */ - customJob?: string | null; + useReference?: boolean | null; /** - * Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`. + * Optional. Which version to use for evaluation. */ - endTime?: string | null; + version?: number | null; + } + /** + * Input for summarization verbosity metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInput { /** - * Output only. The final measurement containing the objective value. + * Required. Summarization verbosity instance. */ - finalMeasurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; + instance?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInstance; /** - * Output only. The identifier of the Trial assigned by the service. + * Required. Spec for summarization verbosity score metric. */ - id?: string | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1SummarizationVerbositySpec; + } + /** + * Spec for summarization verbosity instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityInstance { /** - * Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is `INFEASIBLE`. + * Required. Text to be summarized. */ - infeasibleReason?: string | null; + context?: string | null; /** - * Output only. A list of measurements that are strictly lexicographically ordered by their induced tuples (steps, elapsed_duration). These are used for early stopping computations. + * Optional. Summarization prompt for LLM. */ - measurements?: Schema$GoogleCloudAiplatformV1beta1Measurement[]; + instruction?: string | null; /** - * Output only. Resource name of the Trial assigned by the service. + * Required. Output of the evaluated model. */ - name?: string | null; + prediction?: string | null; /** - * Output only. The parameters of the Trial. + * Optional. Ground truth used to compare against the prediction. */ - parameters?: Schema$GoogleCloudAiplatformV1beta1TrialParameter[]; + reference?: string | null; + } + /** + * Spec for summarization verbosity result. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbosityResult { /** - * Output only. Time when the Trial was started. + * Output only. Confidence for summarization verbosity score. */ - startTime?: string | null; + confidence?: number | null; /** - * Output only. The detailed state of the Trial. + * Output only. Explanation for summarization verbosity score. */ - state?: string | null; + explanation?: string | null; /** - * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if this trial is part of a HyperparameterTuningJob and the job's trial_job_spec.enable_web_access field is `true`. The keys are names of each node used for the trial; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. + * Output only. Summarization Verbosity score. */ - webAccessUris?: {[key: string]: string} | null; + score?: number | null; } /** - * Next ID: 3 + * Spec for summarization verbosity score metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1TrialContext { + export interface Schema$GoogleCloudAiplatformV1beta1SummarizationVerbositySpec { /** - * A human-readable field which can store a description of this context. This will become part of the resulting Trial's description field. + * Optional. Whether to use instance.reference to compute summarization verbosity. */ - description?: string | null; + useReference?: boolean | null; /** - * If/when a Trial is generated or selected from this Context, its Parameters will match any parameters specified here. (I.e. if this context specifies parameter name:'a' int_value:3, then a resulting Trial will have int_value:3 for its parameter named 'a'.) Note that we first attempt to match existing REQUESTED Trials with contexts, and if there are no matches, we generate suggestions in the subspace defined by the parameters specified here. NOTE: a Context without any Parameters matches the entire feasible search space. + * Optional. Which version to use for evaluation. */ - parameters?: Schema$GoogleCloudAiplatformV1beta1TrialParameter[]; + version?: number | null; } /** - * A message representing a parameter to be tuned. + * Hyperparameters for SFT. */ - export interface Schema$GoogleCloudAiplatformV1beta1TrialParameter { + export interface Schema$GoogleCloudAiplatformV1beta1SupervisedHyperParameters { /** - * Output only. The ID of the parameter. The parameter should be defined in StudySpec's Parameters. + * Optional. Adapter size for tuning. */ - parameterId?: string | null; + adapterSize?: string | null; /** - * Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'. + * Optional. Number of complete passes the model makes over the entire training dataset during training. */ - value?: any | null; + epochCount?: string | null; + /** + * Optional. Multiplier for adjusting the default learning rate. + */ + learningRateMultiplier?: number | null; } /** - * The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. + * Dataset distribution for Supervised Tuning. */ - export interface Schema$GoogleCloudAiplatformV1beta1TunedModel { + export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution { /** - * Output only. A resource name of an Endpoint. Format: `projects/{project\}/locations/{location\}/endpoints/{endpoint\}`. + * Output only. Defines the histogram bucket. */ - endpoint?: string | null; + buckets?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistributionDatasetBucket[]; /** - * Output only. The resource name of the TunedModel. Format: `projects/{project\}/locations/{location\}/models/{model\}`. + * Output only. The maximum of the population values. */ - model?: string | null; - } - /** - * The tuning data statistic values for TuningJob. - */ - export interface Schema$GoogleCloudAiplatformV1beta1TuningDataStats { + max?: number | null; /** - * The SFT Tuning data stats. + * Output only. The arithmetic mean of the values in the population. */ - supervisedTuningDataStats?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDataStats; - } - /** - * Represents a TuningJob that runs with Google owned models. - */ - export interface Schema$GoogleCloudAiplatformV1beta1TuningJob { + mean?: number | null; /** - * The base model that is being tuned, e.g., "gemini-1.0-pro-002". + * Output only. The median of the values in the population. */ - baseModel?: string | null; + median?: number | null; /** - * Output only. Time when the TuningJob was created. + * Output only. The minimum of the population values. */ - createTime?: string | null; + min?: number | null; /** - * Optional. The description of the TuningJob. + * Output only. The 5th percentile of the values in the population. */ - description?: string | null; + p5?: number | null; /** - * Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key. + * Output only. The 95th percentile of the values in the population. */ - encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; + p95?: number | null; /** - * Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. + * Output only. Sum of a given population of values. */ - endTime?: string | null; + sum?: string | null; + } + /** + * Dataset bucket used to create a histogram for the distribution given a population of values. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistributionDatasetBucket { /** - * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + * Output only. Number of values in the bucket. */ - error?: Schema$GoogleRpcStatus; + count?: number | null; /** - * Output only. The Experiment associated with this TuningJob. + * Output only. Left bound of the bucket. */ - experiment?: string | null; + left?: number | null; /** - * Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + * Output only. Right bound of the bucket. */ - labels?: {[key: string]: string} | null; + right?: number | null; + } + /** + * Tuning data statistics for Supervised Tuning. + */ + export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDataStats { /** - * Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project\}/locations/{location\}/tuningJobs/{tuning_job\}` + * Output only. Number of billable characters in the tuning dataset. */ - name?: string | null; + totalBillableCharacterCount?: string | null; /** - * Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state. + * Output only. Number of tuning characters in the tuning dataset. */ - startTime?: string | null; + totalTuningCharacterCount?: string | null; /** - * Output only. The detailed state of the job. + * Output only. Number of examples in the tuning dataset. */ - state?: string | null; + tuningDatasetExampleCount?: string | null; /** - * Tuning Spec for Supervised Fine Tuning. + * Output only. Number of tuning steps for this Tuning Job. */ - supervisedTuningSpec?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningSpec; + tuningStepCount?: string | null; /** - * Output only. The tuned model resources assiociated with this TuningJob. + * Output only. Sample user messages in the training dataset uri. */ - tunedModel?: Schema$GoogleCloudAiplatformV1beta1TunedModel; + userDatasetExamples?: Schema$GoogleCloudAiplatformV1beta1Content[]; /** - * Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. + * Output only. Dataset distributions for the user input tokens. */ - tunedModelDisplayName?: string | null; + userInputTokenDistribution?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution; /** - * Output only. The tuning data statistics associated with this TuningJob. + * Output only. Dataset distributions for the messages per example. */ - tuningDataStats?: Schema$GoogleCloudAiplatformV1beta1TuningDataStats; + userMessagePerExampleDistribution?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution; /** - * Output only. Time when the TuningJob was most recently updated. + * Output only. Dataset distributions for the user output tokens. */ - updateTime?: string | null; + userOutputTokenDistribution?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDatasetDistribution; } /** - * Runtime operation information for IndexEndpointService.UndeployIndex. + * Tuning Spec for Supervised Tuning. */ - export interface Schema$GoogleCloudAiplatformV1beta1UndeployIndexOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SupervisedTuningSpec { /** - * The operation generic information. + * Optional. Hyperparameters for SFT. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for IndexEndpointService.UndeployIndex. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UndeployIndexRequest { + hyperParameters?: Schema$GoogleCloudAiplatformV1beta1SupervisedHyperParameters; /** - * Required. The ID of the DeployedIndex to be undeployed from the IndexEndpoint. + * Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. */ - deployedIndexId?: string | null; + trainingDatasetUri?: string | null; + /** + * Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. + */ + validationDatasetUri?: string | null; } /** - * Response message for IndexEndpointService.UndeployIndex. + * Request message for FeatureOnlineStoreAdminService.SyncFeatureView. */ - export interface Schema$GoogleCloudAiplatformV1beta1UndeployIndexResponse {} + export interface Schema$GoogleCloudAiplatformV1beta1SyncFeatureViewRequest {} /** - * Runtime operation information for EndpointService.UndeployModel. + * Respose message for FeatureOnlineStoreAdminService.SyncFeatureView. */ - export interface Schema$GoogleCloudAiplatformV1beta1UndeployModelOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1SyncFeatureViewResponse { /** - * The operation generic information. + * Format: `projects/{project\}/locations/{location\}/featureOnlineStores/{feature_online_store\}/featureViews/{feature_view\}/featureViewSyncs/{feature_view_sync\}` */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + featureViewSync?: string | null; } /** - * Request message for EndpointService.UndeployModel. + * A tensor value type. */ - export interface Schema$GoogleCloudAiplatformV1beta1UndeployModelRequest { + export interface Schema$GoogleCloudAiplatformV1beta1Tensor { /** - * Required. The ID of the DeployedModel to be undeployed from the Endpoint. + * Type specific representations that make it easy to create tensor protos in all languages. Only the representation corresponding to "dtype" can be set. The values hold the flattened representation of the tensor in row major order. BOOL */ - deployedModelId?: string | null; + boolVal?: boolean[] | null; /** - * If this field is provided, then the Endpoint's traffic_split will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A DeployedModel will be successfully undeployed only if it doesn't have any traffic assigned to it when this method executes, or if this field unassigns any traffic to it. + * STRING */ - trafficSplit?: {[key: string]: number} | null; - } - /** - * Response message for EndpointService.UndeployModel. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UndeployModelResponse {} - /** - * Runtime operation information for SolverService.UndeploySolver. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UndeploySolverOperationMetadata { + bytesVal?: string[] | null; + /** + * DOUBLE + */ + doubleVal?: number[] | null; + /** + * The data type of tensor. + */ + dtype?: string | null; + /** + * FLOAT + */ + floatVal?: number[] | null; + /** + * INT64 + */ + int64Val?: string[] | null; + /** + * INT_8 INT_16 INT_32 + */ + intVal?: number[] | null; + /** + * A list of tensor values. + */ + listVal?: Schema$GoogleCloudAiplatformV1beta1Tensor[]; + /** + * Shape of the tensor. + */ + shape?: string[] | null; + /** + * STRING + */ + stringVal?: string[] | null; + /** + * A map of string to tensor. + */ + structVal?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1Tensor; + } | null; /** - * The generic operation information. + * Serialized raw tensor content. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + tensorVal?: string | null; + /** + * UINT64 + */ + uint64Val?: string[] | null; + /** + * UINT8 UINT16 UINT32 + */ + uintVal?: number[] | null; } /** - * Contains model information necessary to perform batch prediction without requiring a full model import. + * Tensorboard is a physical database that stores users' training metrics. A default Tensorboard is provided in each region of a Google Cloud project. If needed users can also create extra Tensorboards in their projects. */ - export interface Schema$GoogleCloudAiplatformV1beta1UnmanagedContainerModel { + export interface Schema$GoogleCloudAiplatformV1beta1Tensorboard { /** - * The path to the directory containing the Model artifact and any of its supporting files. + * Output only. Consumer project Cloud Storage path prefix used to store blob data, which can either be a bucket or directory. Does not end with a '/'. */ - artifactUri?: string | null; + blobStoragePathPrefix?: string | null; /** - * Input only. The specification of the container that is to be used when deploying this Model. + * Output only. Timestamp when this Tensorboard was created. */ - containerSpec?: Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec; + createTime?: string | null; /** - * Contains the schemata used in Model's predictions and explanations + * Description of this Tensorboard. */ - predictSchemata?: Schema$GoogleCloudAiplatformV1beta1PredictSchemata; - } - /** - * Runtime operation information for UpdateDeploymentResourcePool method. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateDeploymentResourcePoolOperationMetadata { + description?: string | null; /** - * The operation generic information. + * Required. User provided name of this Tensorboard. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Runtime operation information for ModelService.UpdateExplanationDataset. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateExplanationDatasetOperationMetadata { + displayName?: string | null; /** - * The common part of the operation metadata. + * Customer-managed encryption key spec for a Tensorboard. If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for ModelService.UpdateExplanationDataset. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateExplanationDatasetRequest { + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * The example config containing the location of the dataset. + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - examples?: Schema$GoogleCloudAiplatformV1beta1Examples; - } - /** - * Response message of ModelService.UpdateExplanationDataset operation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateExplanationDatasetResponse {} - /** - * Details of operations that perform update FeatureGroup. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureGroupOperationMetadata { + etag?: string | null; /** - * Operation metadata for FeatureGroup. + * Used to indicate if the TensorBoard instance is the default one. Each project & region can have at most one default TensorBoard instance. Creation of a default TensorBoard instance and updating an existing TensorBoard instance to be default will mark all other TensorBoard instances (if any) as non default. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform update FeatureOnlineStore. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureOnlineStoreOperationMetadata { + isDefault?: boolean | null; /** - * Operation metadata for FeatureOnlineStore. + * The labels with user-defined metadata to organize your Tensorboards. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Tensorboard (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform update Feature. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureOperationMetadata { + labels?: {[key: string]: string} | null; /** - * Operation metadata for Feature Update. + * Output only. Name of the Tensorboard. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}` */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform update Featurestore. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeaturestoreOperationMetadata { + name?: string | null; /** - * Operation metadata for Featurestore. + * Output only. The number of Runs stored in this Tensorboard. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform update FeatureView. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureViewOperationMetadata { + runCount?: number | null; /** - * Operation metadata for FeatureView Update. + * Output only. Timestamp when this Tensorboard was last updated. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + updateTime?: string | null; } /** - * Runtime operation information for IndexService.UpdateIndex. + * One blob (e.g, image, graph) viewable on a blob metric plot. */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateIndexOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1TensorboardBlob { /** - * The operation generic information. + * Optional. The bytes of the blob is not present unless it's returned by the ReadTensorboardBlobData endpoint. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + data?: string | null; /** - * The operation metadata with regard to Matching Engine Index operation. + * Output only. A URI safe key uniquely identifying a blob. Can be used to locate the blob stored in the Cloud Storage bucket of the consumer project. */ - nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata; + id?: string | null; } /** - * Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob. + * One point viewable on a blob metric plot, but mostly just a wrapper message to work around repeated fields can't be used directly within `oneof` fields. */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateModelDeploymentMonitoringJobOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1TensorboardBlobSequence { /** - * The operation generic information. + * List of blobs contained within the sequence. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + values?: Schema$GoogleCloudAiplatformV1beta1TensorboardBlob[]; } /** - * Runtime operation information for ModelMonitoringService.UpdateModelMonitor. + * A TensorboardExperiment is a group of TensorboardRuns, that are typically the results of a training job run, in a Tensorboard. */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateModelMonitorOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1TensorboardExperiment { /** - * The operation generic information. + * Output only. Timestamp when this TensorboardExperiment was created. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Details of operations that perform update PersistentResource. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdatePersistentResourceOperationMetadata { + createTime?: string | null; /** - * Operation metadata for PersistentResource. + * Description of this TensorboardExperiment. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + description?: string | null; /** - * Progress Message for Update LRO + * User provided name of this TensorboardExperiment. */ - progressMessage?: string | null; - } - /** - * Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateSpecialistPoolOperationMetadata { + displayName?: string | null; /** - * The operation generic information. + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + etag?: string | null; /** - * Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id\}/locations/{location_id\}/specialistPools/{specialist_pool\}` + * The labels with user-defined metadata to organize your TensorboardExperiment. Label keys and values cannot be longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with `aiplatform.googleapis.com/` and are immutable. The following system labels exist for each Dataset: * `aiplatform.googleapis.com/dataset_metadata_schema`: output only. Its value is the metadata_schema's title. */ - specialistPool?: string | null; - } - /** - * Details of operations that perform update Tensorboard. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpdateTensorboardOperationMetadata { + labels?: {[key: string]: string} | null; /** - * Operation metadata for Tensorboard. + * Output only. Name of the TensorboardExperiment. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}` */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Metadata information for NotebookService.UpgradeNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeOperationMetadata { + name?: string | null; /** - * The operation generic information. + * Immutable. Source of the TensorboardExperiment. Example: a custom training job. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + source?: string | null; /** - * A human-readable message that shows the intermediate progress details of NotebookRuntime. + * Output only. Timestamp when this TensorboardExperiment was last updated. */ - progressMessage?: string | null; + updateTime?: string | null; } /** - * Request message for NotebookService.UpgradeNotebookRuntime. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeRequest {} - /** - * Details of ModelService.UploadModel operation. + * TensorboardRun maps to a specific execution of a training job with a given set of hyperparameter values, model definition, dataset, etc */ - export interface Schema$GoogleCloudAiplatformV1beta1UploadModelOperationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1TensorboardRun { /** - * The common part of the operation metadata. + * Output only. Timestamp when this TensorboardRun was created. */ - genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; - } - /** - * Request message for ModelService.UploadModel. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UploadModelRequest { + createTime?: string | null; /** - * Required. The Model to create. + * Description of this TensorboardRun. */ - model?: Schema$GoogleCloudAiplatformV1beta1Model; + description?: string | null; /** - * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + * Required. User provided name of this TensorboardRun. This value must be unique among all TensorboardRuns belonging to the same parent TensorboardExperiment. */ - modelId?: string | null; + displayName?: string | null; /** - * Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - parentModel?: string | null; + etag?: string | null; /** - * Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + * The labels with user-defined metadata to organize your TensorboardRuns. This field will be used to filter and visualize Runs in the Tensorboard UI. For example, a Vertex AI training job can set a label aiplatform.googleapis.com/training_job_id=xxxxx to all the runs created within that job. An end user can set a label experiment_id=xxxxx for all the runs produced in a Jupyter notebook. These runs can be grouped by a label value and visualized together in the Tensorboard UI. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. No more than 64 user labels can be associated with one TensorboardRun (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. */ - serviceAccount?: string | null; - } - /** - * Response message of ModelService.UploadModel operation. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UploadModelResponse { + labels?: {[key: string]: string} | null; /** - * The name of the uploaded Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` + * Output only. Name of the TensorboardRun. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` */ - model?: string | null; + name?: string | null; /** - * Output only. The version ID of the model that is uploaded. + * Output only. Timestamp when this TensorboardRun was last updated. */ - modelVersionId?: string | null; + updateTime?: string | null; } /** - * Config for uploading RagFile. + * One point viewable on a tensor metric plot. */ - export interface Schema$GoogleCloudAiplatformV1beta1UploadRagFileConfig { + export interface Schema$GoogleCloudAiplatformV1beta1TensorboardTensor { /** - * Specifies the size and overlap of chunks after uploading RagFile. + * Required. Serialized form of https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto */ - ragFileChunkingConfig?: Schema$GoogleCloudAiplatformV1beta1RagFileChunkingConfig; + value?: string | null; + /** + * Optional. Version number of TensorProto used to serialize value. + */ + versionNumber?: number | null; } /** - * Request message for VertexRagDataService.UploadRagFile. + * TensorboardTimeSeries maps to times series produced in training runs */ - export interface Schema$GoogleCloudAiplatformV1beta1UploadRagFileRequest { + export interface Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeries { /** - * Required. The RagFile to upload. + * Output only. Timestamp when this TensorboardTimeSeries was created. */ - ragFile?: Schema$GoogleCloudAiplatformV1beta1RagFile; + createTime?: string | null; /** - * Required. The config for the RagFiles to be uploaded into the RagCorpus. VertexRagDataService.UploadRagFile. - */ - uploadRagFileConfig?: Schema$GoogleCloudAiplatformV1beta1UploadRagFileConfig; - } - /** - * Response message for VertexRagDataService.UploadRagFile. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UploadRagFileResponse { + * Description of this TensorboardTimeSeries. + */ + description?: string | null; /** - * The error that occurred while processing the RagFile. + * Required. User provided name of this TensorboardTimeSeries. This value should be unique among all TensorboardTimeSeries resources belonging to the same TensorboardRun resource (parent resource). */ - error?: Schema$GoogleRpcStatus; + displayName?: string | null; /** - * The RagFile that had been uploaded into the RagCorpus. + * Used to perform a consistent read-modify-write updates. If not set, a blind "overwrite" update happens. */ - ragFile?: Schema$GoogleCloudAiplatformV1beta1RagFile; - } - /** - * Request message for IndexService.UpsertDatapoints - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpsertDatapointsRequest { + etag?: string | null; /** - * A list of datapoints to be created/updated. + * Output only. Scalar, Tensor, or Blob metadata for this TensorboardTimeSeries. */ - datapoints?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint[]; + metadata?: Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeriesMetadata; /** - * Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts. + * Output only. Name of the TensorboardTimeSeries. */ - updateMask?: string | null; - } - /** - * Response message for IndexService.UpsertDatapoints - */ - export interface Schema$GoogleCloudAiplatformV1beta1UpsertDatapointsResponse {} - /** - * References an API call. It contains more information about long running operation and Jobs that are triggered by the API call. - */ - export interface Schema$GoogleCloudAiplatformV1beta1UserActionReference { + name?: string | null; /** - * For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project\}/locations/{location\}/dataLabelingJobs/{data_labeling_job\}` + * Data of the current plugin, with the size limited to 65KB. */ - dataLabelingJob?: string | null; + pluginData?: string | null; /** - * The method name of the API RPC call. For example, "/google.cloud.aiplatform.{apiVersion\}.DatasetService.CreateDataset" + * Immutable. Name of the plugin this time series pertain to. Such as Scalar, Tensor, Blob */ - method?: string | null; + pluginName?: string | null; /** - * For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project\}/locations/{location\}/operations/{operation\}` + * Output only. Timestamp when this TensorboardTimeSeries was last updated. */ - operation?: string | null; + updateTime?: string | null; + /** + * Required. Immutable. Type of TensorboardTimeSeries value. + */ + valueType?: string | null; } /** - * Value is the value of the field. + * Describes metadata for a TensorboardTimeSeries. */ - export interface Schema$GoogleCloudAiplatformV1beta1Value { + export interface Schema$GoogleCloudAiplatformV1beta1TensorboardTimeSeriesMetadata { /** - * A double value. + * Output only. The largest blob sequence length (number of blobs) of all data points in this time series, if its ValueType is BLOB_SEQUENCE. */ - doubleValue?: number | null; + maxBlobSequenceLength?: string | null; /** - * An integer value. + * Output only. Max step index of all data points within a TensorboardTimeSeries. */ - intValue?: string | null; + maxStep?: string | null; /** - * A string value. + * Output only. Max wall clock timestamp of all data points within a TensorboardTimeSeries. */ - stringValue?: string | null; + maxWallTime?: string | null; } /** - * Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation + * The storage details for TFRecord output content. */ - export interface Schema$GoogleCloudAiplatformV1beta1VertexAISearch { + export interface Schema$GoogleCloudAiplatformV1beta1TFRecordDestination { /** - * Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project\}/locations/{location\}/collections/{collection\}/dataStores/{dataStore\}` + * Required. Google Cloud Storage location. */ - datastore?: string | null; + gcsDestination?: Schema$GoogleCloudAiplatformV1beta1GcsDestination; } /** - * Retrieve from Vertex RAG Store for grounding. + * The config for feature monitoring threshold. */ - export interface Schema$GoogleCloudAiplatformV1beta1VertexRagStore { + export interface Schema$GoogleCloudAiplatformV1beta1ThresholdConfig { /** - * Optional. Deprecated. Please use rag_resources instead. + * Specify a threshold value that can trigger the alert. If this threshold config is for feature distribution distance: 1. For categorical feature, the distribution distance is calculated by L-inifinity norm. 2. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. */ - ragCorpora?: string[] | null; + value?: number | null; + } + /** + * All the data stored in a TensorboardTimeSeries. + */ + export interface Schema$GoogleCloudAiplatformV1beta1TimeSeriesData { /** - * Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. + * Required. The ID of the TensorboardTimeSeries, which will become the final component of the TensorboardTimeSeries' resource name */ - ragResources?: Schema$GoogleCloudAiplatformV1beta1VertexRagStoreRagResource[]; + tensorboardTimeSeriesId?: string | null; /** - * Optional. Number of top k results to return from the selected corpora. + * Required. Data points in this time series. */ - similarityTopK?: number | null; + values?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesDataPoint[]; /** - * Optional. Only return results with vector distance smaller than the threshold. + * Required. Immutable. The value type of this time series. All the values in this time series data must match this value type. */ - vectorDistanceThreshold?: number | null; + valueType?: string | null; } /** - * The definition of the Rag resource. + * A TensorboardTimeSeries data point. */ - export interface Schema$GoogleCloudAiplatformV1beta1VertexRagStoreRagResource { + export interface Schema$GoogleCloudAiplatformV1beta1TimeSeriesDataPoint { /** - * Optional. RagCorpora resource name. Format: `projects/{project\}/locations/{location\}/ragCorpora/{rag_corpus\}` + * A blob sequence value. */ - ragCorpus?: string | null; + blobs?: Schema$GoogleCloudAiplatformV1beta1TensorboardBlobSequence; /** - * Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. + * A scalar value. */ - ragFileIds?: string[] | null; - } - /** - * Metadata describes the input video content. - */ - export interface Schema$GoogleCloudAiplatformV1beta1VideoMetadata { + scalar?: Schema$GoogleCloudAiplatformV1beta1Scalar; /** - * Optional. The end offset of the video. + * Step index of this data point within the run. */ - endOffset?: string | null; + step?: string | null; /** - * Optional. The start offset of the video. + * A tensor value. */ - startOffset?: string | null; + tensor?: Schema$GoogleCloudAiplatformV1beta1TensorboardTensor; + /** + * Wall clock timestamp when this data point is generated by the end user. + */ + wallTime?: string | null; } /** - * Represents the spec of a worker pool in a job. + * Assigns input data to training, validation, and test sets based on a provided timestamps. The youngest data pieces are assigned to training set, next to validation set, and the oldest to the test set. Supported only for tabular Datasets. */ - export interface Schema$GoogleCloudAiplatformV1beta1WorkerPoolSpec { - /** - * The custom container task. - */ - containerSpec?: Schema$GoogleCloudAiplatformV1beta1ContainerSpec; - /** - * Disk spec. - */ - diskSpec?: Schema$GoogleCloudAiplatformV1beta1DiskSpec; + export interface Schema$GoogleCloudAiplatformV1beta1TimestampSplit { /** - * Optional. Immutable. The specification of a single machine. + * Required. The key is a name of one of the Dataset's data columns. The values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. */ - machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; + key?: string | null; /** - * Optional. List of NFS mount spec. + * The fraction of the input data that is to be used to evaluate the Model. */ - nfsMounts?: Schema$GoogleCloudAiplatformV1beta1NfsMount[]; + testFraction?: number | null; /** - * The Python packaged task. + * The fraction of the input data that is to be used to train the Model. */ - pythonPackageSpec?: Schema$GoogleCloudAiplatformV1beta1PythonPackageSpec; + trainingFraction?: number | null; /** - * Optional. The number of worker replicas to use for this worker pool. + * The fraction of the input data that is to be used to validate the Model. */ - replicaCount?: string | null; + validationFraction?: number | null; } /** - * Contains Feature values to be written for a specific entity. + * Tokens info with a list of tokens and the corresponding list of token ids. */ - export interface Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload { + export interface Schema$GoogleCloudAiplatformV1beta1TokensInfo { /** - * Required. The ID of the entity. + * A list of token ids from the input. */ - entityId?: string | null; + tokenIds?: string[] | null; /** - * Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future. + * A list of tokens from the input. */ - featureValues?: { - [key: string]: Schema$GoogleCloudAiplatformV1beta1FeatureValue; - } | null; + tokens?: string[] | null; } /** - * Request message for FeaturestoreOnlineServingService.WriteFeatureValues. + * Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). */ - export interface Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesRequest { + export interface Schema$GoogleCloudAiplatformV1beta1Tool { /** - * Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`. + * Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. */ - payloads?: Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload[]; - } - /** - * Response message for FeaturestoreOnlineServingService.WriteFeatureValues. - */ - export interface Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesResponse {} - /** - * Request message for TensorboardService.WriteTensorboardExperimentData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataRequest { + functionDeclarations?: Schema$GoogleCloudAiplatformV1beta1FunctionDeclaration[]; /** - * Required. Requests containing per-run TensorboardTimeSeries data to write. + * Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. */ - writeRunDataRequests?: Schema$GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest[]; + retrieval?: Schema$GoogleCloudAiplatformV1beta1Retrieval; } /** - * Response message for TensorboardService.WriteTensorboardExperimentData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataResponse {} - /** - * Request message for TensorboardService.WriteTensorboardRunData. + * Input for tool call valid metric. */ - export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidInput { /** - * Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` + * Required. Repeated tool call valid instances. */ - tensorboardRun?: string | null; + instances?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidInstance[]; /** - * Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000. + * Required. Spec for tool call valid metric. */ - timeSeriesData?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesData[]; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidSpec; } /** - * Response message for TensorboardService.WriteTensorboardRunData. - */ - export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardRunDataResponse {} - /** - * An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. + * Spec for tool call valid instance. */ - export interface Schema$GoogleCloudAiplatformV1beta1XraiAttribution { - /** - * Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 - */ - blurBaselineConfig?: Schema$GoogleCloudAiplatformV1beta1BlurBaselineConfig; + export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidInstance { /** - * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + * Required. Output of the evaluated model. */ - smoothGradConfig?: Schema$GoogleCloudAiplatformV1beta1SmoothGradConfig; + prediction?: string | null; /** - * Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. + * Required. Ground truth used to compare against the prediction. */ - stepCount?: number | null; + reference?: string | null; } /** - * The response message for Locations.ListLocations. + * Tool call valid metric value for an instance. */ - export interface Schema$GoogleCloudLocationListLocationsResponse { - /** - * A list of locations that matches the specified filter in the request. - */ - locations?: Schema$GoogleCloudLocationLocation[]; + export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidMetricValue { /** - * The standard List next-page token. + * Output only. Tool call valid score. */ - nextPageToken?: string | null; + score?: number | null; } /** - * A resource that represents a Google Cloud location. + * Results for tool call valid metric. */ - export interface Schema$GoogleCloudLocationLocation { - /** - * The friendly name for this location, typically a nearby city name. For example, "Tokyo". - */ - displayName?: string | null; - /** - * Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"\} - */ - labels?: {[key: string]: string} | null; - /** - * The canonical id for this location. For example: `"us-east1"`. - */ - locationId?: string | null; - /** - * Service-specific metadata. For example the available capacity at the given location. - */ - metadata?: {[key: string]: any} | null; + export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidResults { /** - * Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"` + * Output only. Tool call valid metric values. */ - name?: string | null; + toolCallValidMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolCallValidMetricValue[]; } /** - * Associates `members`, or principals, with a `role`. + * Spec for tool call valid metric. */ - export interface Schema$GoogleIamV1Binding { + export interface Schema$GoogleCloudAiplatformV1beta1ToolCallValidSpec {} + /** + * Tool config. This config is shared for all tools provided in the request. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ToolConfig { /** - * The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + * Optional. Function calling config. */ - condition?: Schema$GoogleTypeExpr; + functionCallingConfig?: Schema$GoogleCloudAiplatformV1beta1FunctionCallingConfig; + } + /** + * Input for tool name match metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInput { /** - * Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid\}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid\}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid\}.svc.id.goog[{namespace\}/{kubernetes-sa\}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid\}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain\}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/group/{group_id\}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/x`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/group/{group_id\}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/x`: All identities in a workload identity pool. * `deleted:user:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid\}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid\}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid\}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`. + * Required. Repeated tool name match instances. */ - members?: string[] | null; + instances?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInstance[]; /** - * Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles). + * Required. Spec for tool name match metric. */ - role?: string | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchSpec; } /** - * Request message for `GetIamPolicy` method. + * Spec for tool name match instance. */ - export interface Schema$GoogleIamV1GetIamPolicyRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchInstance { /** - * OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`. + * Required. Output of the evaluated model. */ - options?: Schema$GoogleIamV1GetPolicyOptions; + prediction?: string | null; + /** + * Required. Ground truth used to compare against the prediction. + */ + reference?: string | null; } /** - * Encapsulates settings provided to GetIamPolicy. + * Tool name match metric value for an instance. */ - export interface Schema$GoogleIamV1GetPolicyOptions { + export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchMetricValue { /** - * Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + * Output only. Tool name match score. */ - requestedPolicyVersion?: number | null; + score?: number | null; } /** - * An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] \}, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", \} \} ], "etag": "BwWWja0YfJA=", "version": 3 \} ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). + * Results for tool name match metric. */ - export interface Schema$GoogleIamV1Policy { + export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchResults { /** - * Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`. + * Output only. Tool name match metric values. */ - bindings?: Schema$GoogleIamV1Binding[]; + toolNameMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolNameMatchMetricValue[]; + } + /** + * Spec for tool name match metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ToolNameMatchSpec {} + /** + * Input for tool parameter key match metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInput { /** - * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. + * Required. Repeated tool parameter key match instances. */ - etag?: string | null; + instances?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInstance[]; /** - * Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + * Required. Spec for tool parameter key match metric. */ - version?: number | null; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchSpec; } /** - * Request message for `SetIamPolicy` method. + * Spec for tool parameter key match instance. */ - export interface Schema$GoogleIamV1SetIamPolicyRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInstance { /** - * REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them. + * Required. Output of the evaluated model. */ - policy?: Schema$GoogleIamV1Policy; + prediction?: string | null; + /** + * Required. Ground truth used to compare against the prediction. + */ + reference?: string | null; } /** - * Request message for `TestIamPermissions` method. + * Tool parameter key match metric value for an instance. */ - export interface Schema$GoogleIamV1TestIamPermissionsRequest { + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchMetricValue { /** - * The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * Output only. Tool parameter key match score. */ - permissions?: string[] | null; + score?: number | null; } /** - * Response message for `TestIamPermissions` method. + * Results for tool parameter key match metric. */ - export interface Schema$GoogleIamV1TestIamPermissionsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchResults { /** - * A subset of `TestPermissionsRequest.permissions` that the caller is allowed. + * Output only. Tool parameter key match metric values. */ - permissions?: string[] | null; + toolParameterKeyMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchMetricValue[]; } /** - * The response message for Operations.ListOperations. + * Spec for tool parameter key match metric. */ - export interface Schema$GoogleLongrunningListOperationsResponse { + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKeyMatchSpec {} + /** + * Input for tool parameter key value match metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInput { /** - * The standard List next-page token. + * Required. Repeated tool parameter key value match instances. */ - nextPageToken?: string | null; + instances?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInstance[]; /** - * A list of operations that matches the specified filter in the request. + * Required. Spec for tool parameter key value match metric. */ - operations?: Schema$GoogleLongrunningOperation[]; + metricSpec?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchSpec; } /** - * This resource represents a long-running operation that is the result of a network API call. + * Spec for tool parameter key value match instance. */ - export interface Schema$GoogleLongrunningOperation { - /** - * If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. - */ - done?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchInstance { /** - * The error result of the operation in case of failure or cancellation. + * Required. Output of the evaluated model. */ - error?: Schema$GoogleRpcStatus; + prediction?: string | null; /** - * Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + * Required. Ground truth used to compare against the prediction. */ - metadata?: {[key: string]: any} | null; + reference?: string | null; + } + /** + * Tool parameter key value match metric value for an instance. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchMetricValue { /** - * The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id\}`. + * Output only. Tool parameter key value match score. */ - name?: string | null; + score?: number | null; + } + /** + * Results for tool parameter key value match metric. + */ + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchResults { /** - * The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + * Output only. Tool parameter key value match metric values. */ - response?: {[key: string]: any} | null; + toolParameterKvMatchMetricValues?: Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchMetricValue[]; } /** - * A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); \} + * Spec for tool parameter key value match metric. */ - export interface Schema$GoogleProtobufEmpty {} + export interface Schema$GoogleCloudAiplatformV1beta1ToolParameterKVMatchSpec { + /** + * Optional. Whether to use STRCIT string match on parameter values. + */ + useStrictStringMatch?: boolean | null; + } /** - * The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + * A single example of the tool usage. */ - export interface Schema$GoogleRpcStatus { + export interface Schema$GoogleCloudAiplatformV1beta1ToolUseExample { /** - * The status code, which should be an enum value of google.rpc.Code. + * Required. The display name for example. */ - code?: number | null; + displayName?: string | null; /** - * A list of messages that carry the error details. There is a common set of message types for APIs to use. + * Extension operation to call. */ - details?: Array<{[key: string]: any}> | null; + extensionOperation?: Schema$GoogleCloudAiplatformV1beta1ToolUseExampleExtensionOperation; /** - * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + * Function name to call. */ - message?: string | null; - } - /** - * Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); \} public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); \} return resultBuilder.build(); \} // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; \} return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; \} static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; \} Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; \} [result autorelease]; return result; \} // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); \} var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); \}; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); \} resultBuilder.push(hexString); return resultBuilder.join(''); \}; // ... - */ - export interface Schema$GoogleTypeColor { + functionName?: string | null; /** - * The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). + * Required. Query that should be routed to this tool. */ - alpha?: number | null; + query?: string | null; /** - * The amount of blue in the color as a value in the interval [0, 1]. + * Request parameters used for executing this tool. */ - blue?: number | null; + requestParams?: {[key: string]: any} | null; /** - * The amount of green in the color as a value in the interval [0, 1]. + * Response parameters generated by this tool. */ - green?: number | null; + responseParams?: {[key: string]: any} | null; /** - * The amount of red in the color as a value in the interval [0, 1]. + * Summary of the tool response to the user query. */ - red?: number | null; + responseSummary?: string | null; } /** - * Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp + * Identifies one operation of the extension. */ - export interface Schema$GoogleTypeDate { - /** - * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. - */ - day?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1ToolUseExampleExtensionOperation { /** - * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. + * Resource name of the extension. */ - month?: number | null; + extension?: string | null; /** - * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. + * Required. Operation ID of the extension. */ - year?: number | null; + operationId?: string | null; } /** - * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + * CMLE training config. For every active learning labeling iteration, system will train a machine learning model on CMLE. The trained model will be used by data sampling algorithm to select DataItems. */ - export interface Schema$GoogleTypeExpr { + export interface Schema$GoogleCloudAiplatformV1beta1TrainingConfig { /** - * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + * The timeout hours for the CMLE training job, expressed in milli hours i.e. 1,000 value in this field means 1 hour. */ - description?: string | null; + timeoutTrainingMilliHours?: string | null; + } + /** + * The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from Vertex AI's Dataset which becomes the training input, upload the Model to Vertex AI, and evaluate the Model. + */ + export interface Schema$GoogleCloudAiplatformV1beta1TrainingPipeline { /** - * Textual representation of an expression in Common Expression Language syntax. + * Output only. Time when the TrainingPipeline was created. */ - expression?: string | null; + createTime?: string | null; /** - * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + * Required. The user-defined name of this TrainingPipeline. */ - location?: string | null; + displayName?: string | null; /** - * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + * Customer-managed encryption key spec for a TrainingPipeline. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if model_to_upload is not set separately. */ - title?: string | null; - } - /** - * Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time. - */ - export interface Schema$GoogleTypeInterval { + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end. + * Output only. Time when the TrainingPipeline entered any of the following states: `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, `PIPELINE_STATE_CANCELLED`. */ endTime?: string | null; /** - * Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start. + * Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or `PIPELINE_STATE_CANCELLED`. */ - startTime?: string | null; - } - /** - * Represents an amount of money with its currency type. - */ - export interface Schema$GoogleTypeMoney { + error?: Schema$GoogleRpcStatus; /** - * The three-letter currency code defined in ISO 4217. + * Specifies Vertex AI owned input data that may be used for training the Model. The TrainingPipeline's training_task_definition should make clear whether this config is used and if there are any special requirements on how it should be filled. If nothing about this config is mentioned in the training_task_definition, then it should be assumed that the TrainingPipeline does not depend on this configuration. */ - currencyCode?: string | null; + inputDataConfig?: Schema$GoogleCloudAiplatformV1beta1InputDataConfig; /** - * Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000. + * The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. */ - nanos?: number | null; + labels?: {[key: string]: string} | null; /** - * The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. + * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. */ - units?: string | null; - } - export interface Schema$IntelligenceCloudAutomlXpsMetricEntry { + modelId?: string | null; /** - * For billing metrics that are using legacy sku's, set the legacy billing metric id here. This will be sent to Chemist as the "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave empty. + * Describes the Model that may be uploaded (via ModelService.UploadModel) by this TrainingPipeline. The TrainingPipeline's training_task_definition should make clear whether this Model description should be populated, and if there are any special requirements regarding how it should be filled. If nothing is mentioned in the training_task_definition, then it should be assumed that this field should not be filled and the training task either uploads the Model without a need of this information, or that training task does not support uploading a Model as part of the pipeline. When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and the trained Model had been uploaded into Vertex AI, then the model_to_upload's resource name is populated. The Model is always uploaded into the Project and Location in which this pipeline is. */ - argentumMetricId?: string | null; + modelToUpload?: Schema$GoogleCloudAiplatformV1beta1Model; /** - * A double value. + * Output only. Resource name of the TrainingPipeline. */ - doubleValue?: number | null; + name?: string | null; /** - * A signed 64-bit integer value. + * Optional. When specify this field, the `model_to_upload` will not be uploaded as a new model, instead, it will become a new version of this `parent_model`. */ - int64Value?: string | null; + parentModel?: string | null; /** - * The metric name defined in the service configuration. + * Output only. Time when the TrainingPipeline for the first time entered the `PIPELINE_STATE_RUNNING` state. */ - metricName?: string | null; + startTime?: string | null; /** - * Billing system labels for this (metric, value) pair. + * Output only. The detailed state of the pipeline. */ - systemLabels?: Schema$IntelligenceCloudAutomlXpsMetricEntryLabel[]; - } - export interface Schema$IntelligenceCloudAutomlXpsMetricEntryLabel { + state?: string | null; /** - * The name of the label. + * Required. A Google Cloud Storage path to the YAML file that defines the training task which is responsible for producing the model artifact, and may also include additional auxiliary work. The definition files that can be used here are found in gs://google-cloud-aiplatform/schema/trainingjob/definition/. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. */ - labelName?: string | null; + trainingTaskDefinition?: string | null; /** - * The value of the label. + * Required. The training task's parameter(s), as specified in the training_task_definition's `inputs`. */ - labelValue?: string | null; - } - export interface Schema$IntelligenceCloudAutomlXpsReportingMetrics { + trainingTaskInputs?: any | null; /** - * The effective time training used. If set, this is used for quota management and billing. Deprecated. AutoML BE doesn't use this. Don't set. + * Output only. The metadata information as specified in the training_task_definition's `metadata`. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's training_task_definition contains `metadata` object. */ - effectiveTrainingDuration?: string | null; + trainingTaskMetadata?: any | null; /** - * One entry per metric name. The values must be aggregated per metric name. + * Output only. Time when the TrainingPipeline was most recently updated. */ - metricEntries?: Schema$IntelligenceCloudAutomlXpsMetricEntry[]; + updateTime?: string | null; } /** - * The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30 + * A message representing a Trial. A Trial contains a unique set of Parameters that has been or will be evaluated, along with the objective metrics got by running the Trial. */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoDocAttribution { - amarnaId?: string | null; - arxivId?: string | null; - author?: string | null; - bibkey?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1Trial { /** - * ID of the paper in bioarxiv like ddoi.org/{biorxiv_id\} eg: https://doi.org/10.1101/343517 + * Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique client_id. When a client asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client should evaluate the Trial, complete it, and report back to Vertex AI Vizier. If suggestion is asked again by same client_id before the Trial is completed, the same Trial will be returned. Multiple clients with different client_ids can ask for suggestions simultaneously, each of them will get their own Trial. */ - biorxivId?: string | null; - bookTitle?: string | null; + clientId?: string | null; /** - * The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. + * Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's Trial. */ - bookVolumeId?: string | null; - category?: string | null; - conversationId?: string | null; + customJob?: string | null; /** - * The dataset this document comes from. + * Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`. */ - dataset?: string | null; - filepath?: string | null; - geminiId?: string | null; - gnewsArticleTitle?: string | null; - goodallExampleId?: string | null; + endTime?: string | null; + /** + * Output only. The final measurement containing the objective value. + */ + finalMeasurement?: Schema$GoogleCloudAiplatformV1beta1Measurement; /** - * Whether the document is opted out. + * Output only. The identifier of the Trial assigned by the service. */ - isOptOut?: boolean | null; - isPrompt?: boolean | null; - lamdaExampleId?: string | null; - license?: string | null; - meenaConversationId?: string | null; + id?: string | null; /** - * Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. + * Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is `INFEASIBLE`. */ - naturalLanguageCode?: string | null; + infeasibleReason?: string | null; /** - * True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. + * Output only. A list of measurements that are strictly lexicographically ordered by their induced tuples (steps, elapsed_duration). These are used for early stopping computations. */ - noAttribution?: boolean | null; - podcastUtteranceId?: string | null; - publicationDate?: Schema$GoogleTypeDate; + measurements?: Schema$GoogleCloudAiplatformV1beta1Measurement[]; /** - * This field is for opt-out experiment only, MUST never be used during actual production/serving. + * Output only. Resource name of the Trial assigned by the service. */ - qualityScoreExperimentOnly?: number | null; + name?: string | null; /** - * Github repository + * Output only. The parameters of the Trial. */ - repo?: string | null; + parameters?: Schema$GoogleCloudAiplatformV1beta1TrialParameter[]; /** - * URL of a webdoc + * Output only. Time when the Trial was started. */ - url?: string | null; - volumeId?: string | null; + startTime?: string | null; /** - * Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. + * Output only. The detailed state of the Trial. */ - wikipediaArticleTitle?: string | null; + state?: string | null; /** - * The unique video id from Youtube. Example: AkoGsW52Ir0 + * Output only. URIs for accessing [interactive shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) (one URI for each training node). Only available if this trial is part of a HyperparameterTuningJob and the job's trial_job_spec.enable_web_access field is `true`. The keys are names of each node used for the trial; for example, `workerpool0-0` for the primary node, `workerpool1-0` for the first node in the second worker pool, and `workerpool1-1` for the second node in the second worker pool. The values are the URIs for each node's interactive shell. */ - youtubeVideoId?: string | null; + webAccessUris?: {[key: string]: string} | null; } /** - * The recitation result for one input + * Next ID: 3 */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoRecitationResult { - dynamicSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; + export interface Schema$GoogleCloudAiplatformV1beta1TrialContext { + /** + * A human-readable field which can store a description of this context. This will become part of the resulting Trial's description field. + */ + description?: string | null; /** - * The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK \> CITE \> NO_ACTION. When the given input is not found in any source, the recitation action will not be specified. + * If/when a Trial is generated or selected from this Context, its Parameters will match any parameters specified here. (I.e. if this context specifies parameter name:'a' int_value:3, then a resulting Trial will have int_value:3 for its parameter named 'a'.) Note that we first attempt to match existing REQUESTED Trials with contexts, and if there are no matches, we generate suggestions in the subspace defined by the parameters specified here. NOTE: a Context without any Parameters matches the entire feasible search space. */ - recitationAction?: string | null; - trainingSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; + parameters?: Schema$GoogleCloudAiplatformV1beta1TrialParameter[]; } /** - * The recitation result for each segment in a given input. + * A message representing a parameter to be tuned. */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult { - /** - * The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. - */ - attributionDataset?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1TrialParameter { /** - * human-friendly string that contains information from doc_attribution which could be shown by clients + * Output only. The ID of the parameter. The parameter should be defined in StudySpec's Parameters. */ - displayAttributionMessage?: string | null; - docAttribution?: Schema$LanguageLabsAidaTrustRecitationProtoDocAttribution; + parameterId?: string | null; /** - * number of documents that contained this segment + * Output only. The value of the parameter. `number_value` will be set if a parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec is in type 'CATEGORICAL'. */ - docOccurrences?: number | null; - endIndex?: number | null; + value?: any | null; + } + /** + * The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1TunedModel { /** - * The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. + * Output only. A resource name of an Endpoint. Format: `projects/{project\}/locations/{location\}/endpoints/{endpoint\}`. */ - rawText?: string | null; - segmentRecitationAction?: string | null; + endpoint?: string | null; /** - * The category of the source dataset where the segment came from. This is more stable than Dataset. + * Output only. The resource name of the TunedModel. Format: `projects/{project\}/locations/{location\}/models/{model\}`. */ - sourceCategory?: string | null; + model?: string | null; + } + /** + * The tuning data statistic values for TuningJob. + */ + export interface Schema$GoogleCloudAiplatformV1beta1TuningDataStats { /** - * The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. + * The SFT Tuning data stats. */ - startIndex?: number | null; + supervisedTuningDataStats?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningDataStats; } /** - * The recitation result for one stream input + * Represents a TuningJob that runs with Google owned models. */ - export interface Schema$LanguageLabsAidaTrustRecitationProtoStreamRecitationResult { + export interface Schema$GoogleCloudAiplatformV1beta1TuningJob { /** - * The recitation result against the given dynamic data source. + * The base model that is being tuned, e.g., "gemini-1.0-pro-002". */ - dynamicSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; + baseModel?: string | null; /** - * Last index of input text fully checked for recitation in the entire streaming context. Would return `-1` if no Input was checked for recitation. + * Output only. Time when the TuningJob was created. */ - fullyCheckedTextIndex?: number | null; + createTime?: string | null; /** - * The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK \> CITE \> NO_ACTION. + * Optional. The description of the TuningJob. */ - recitationAction?: string | null; + description?: string | null; /** - * The recitation result against model training data. + * Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key. */ - trainingSegmentResults?: Schema$LanguageLabsAidaTrustRecitationProtoSegmentResult[]; - } - /** - * Recitation check result for a single content chunk. - */ - export interface Schema$LearningGenaiRecitationContentChunkRecitationCheckResult { - imageResult?: Schema$LearningGenaiRecitationImageRecitationCheckResult; - textResult?: Schema$LearningGenaiRecitationRecitationResult; - } - /** - * The proto defines the attribution information for a document using whatever fields are most applicable for that document's datasource. For example, a Wikipedia article's attribution is in the form of its article title, a website is in the form of a URL, and a Github repo is in the form of a repo name. Next id: 30 - */ - export interface Schema$LearningGenaiRecitationDocAttribution { - amarnaId?: string | null; - arxivId?: string | null; - author?: string | null; - bibkey?: string | null; + encryptionSpec?: Schema$GoogleCloudAiplatformV1beta1EncryptionSpec; /** - * ID of the paper in bioarxiv like ddoi.org/{biorxiv_id\} eg: https://doi.org/10.1101/343517 + * Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. */ - biorxivId?: string | null; - bookTitle?: string | null; + endTime?: string | null; /** - * The Oceanographers full-view books dataset uses a 'volume id' as the unique ID of a book. There is a deterministic function from a volume id to a URL under the books.google.com domain. Marked as 'optional' since a volume ID of zero is potentially possible and we want to distinguish that from the volume ID not being set. + * Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ - bookVolumeId?: string | null; - conversationId?: string | null; + error?: Schema$GoogleRpcStatus; /** - * The dataset this document comes from. + * Output only. The Experiment associated with this TuningJob. */ - dataset?: string | null; - filepath?: string | null; - geminiId?: string | null; - gnewsArticleTitle?: string | null; - goodallExampleId?: string | null; + experiment?: string | null; + /** + * Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + */ + labels?: {[key: string]: string} | null; /** - * Whether the document is opted out. + * Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project\}/locations/{location\}/tuningJobs/{tuning_job\}` */ - isOptOut?: boolean | null; + name?: string | null; /** - * When true, this attribution came from the user's prompt. + * Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state. */ - isPrompt?: boolean | null; - lamdaExampleId?: string | null; - license?: string | null; - meenaConversationId?: string | null; + startTime?: string | null; + /** + * Output only. The detailed state of the job. + */ + state?: string | null; /** - * Natural (not programming) language of the document. Language code as defined by http://www.unicode.org/reports/tr35/#Identifiers and https://tools.ietf.org/html/bcp47. Currently applicable to full-view books. Use docinfo-util.h to set & read language fields. See go/iii. + * Tuning Spec for Supervised Fine Tuning. */ - naturalLanguageCode?: string | null; + supervisedTuningSpec?: Schema$GoogleCloudAiplatformV1beta1SupervisedTuningSpec; /** - * True if this doc has no attribution information available. We use an explicit field for this instead of just implicitly leaving all the DocAttribution fields blank to distinguish a case where a bug/oversight has left the attribution information empty vs when we really have no attribution information available. + * Output only. The tuned model resources assiociated with this TuningJob. */ - noAttribution?: boolean | null; - podcastUtteranceId?: string | null; - publicationDate?: Schema$GoogleTypeDate; + tunedModel?: Schema$GoogleCloudAiplatformV1beta1TunedModel; /** - * This field is for opt-out experiment only, MUST never be used during actual production/serving. + * Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ - qualityScoreExperimentOnly?: number | null; + tunedModelDisplayName?: string | null; /** - * Github repository + * Output only. The tuning data statistics associated with this TuningJob. */ - repo?: string | null; + tuningDataStats?: Schema$GoogleCloudAiplatformV1beta1TuningDataStats; /** - * URL of a webdoc + * Output only. Time when the TuningJob was most recently updated. */ - url?: string | null; - volumeId?: string | null; + updateTime?: string | null; + } + /** + * Runtime operation information for IndexEndpointService.UndeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UndeployIndexOperationMetadata { /** - * Wikipedia article title. The Wikipedia TFDS dataset includes article titles but not URLs. While a URL is to the best of our knowledge a deterministic function of the title, we store the original title to reflect the information in the original dataset. + * The operation generic information. */ - wikipediaArticleTitle?: string | null; - youtubeVideoId?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Attribution information about the recited image. + * Request message for IndexEndpointService.UndeployIndex. */ - export interface Schema$LearningGenaiRecitationImageDocAttribution { + export interface Schema$GoogleCloudAiplatformV1beta1UndeployIndexRequest { /** - * Unique ID of the image. + * Required. The ID of the DeployedIndex to be undeployed from the IndexEndpoint. */ - datasetName?: string | null; + deployedIndexId?: string | null; + } + /** + * Response message for IndexEndpointService.UndeployIndex. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UndeployIndexResponse {} + /** + * Runtime operation information for EndpointService.UndeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UndeployModelOperationMetadata { /** - * Doc ID to identify the image. These could be urls of images or amarna id. + * The operation generic information. */ - stringDocids?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } - export interface Schema$LearningGenaiRecitationImageRecitationCheckResult { + /** + * Request message for EndpointService.UndeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UndeployModelRequest { /** - * Only has NO_ACTION or BLOCK to start with. + * Required. The ID of the DeployedModel to be undeployed from the Endpoint. */ - recitationAction?: string | null; + deployedModelId?: string | null; /** - * Images that are similar to the requested image. + * If this field is provided, then the Endpoint's traffic_split will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A DeployedModel will be successfully undeployed only if it doesn't have any traffic assigned to it when this method executes, or if this field unassigns any traffic to it. */ - recitedImages?: Schema$LearningGenaiRecitationImageRecitationCheckResultSimilarImage[]; + trafficSplit?: {[key: string]: number} | null; } - export interface Schema$LearningGenaiRecitationImageRecitationCheckResultSimilarImage { + /** + * Response message for EndpointService.UndeployModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UndeployModelResponse {} + /** + * Runtime operation information for SolverService.UndeploySolver. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UndeploySolverOperationMetadata { /** - * Attribution information about the image + * The generic operation information. */ - docAttribution?: Schema$LearningGenaiRecitationImageDocAttribution; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Contains model information necessary to perform batch prediction without requiring a full model import. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UnmanagedContainerModel { /** - * The memorization embedding model that returned this image + * The path to the directory containing the Model artifact and any of its supporting files. */ - embeddingModel?: string | null; + artifactUri?: string | null; /** - * Image ID corresponding of the image corresponding to the score. `image_id` serves for debugging purposes and can't be used by clients to retrieve an image. + * Input only. The specification of the container that is to be used when deploying this Model. */ - imageId?: string | null; + containerSpec?: Schema$GoogleCloudAiplatformV1beta1ModelContainerSpec; /** - * Similarity score of requested image compared with image in training data. + * Contains the schemata used in Model's predictions and explanations */ - scores?: number | null; + predictSchemata?: Schema$GoogleCloudAiplatformV1beta1PredictSchemata; } /** - * Recitation check result for a stream of content chunks (e.g. a model response). + * Runtime operation information for UpdateDeploymentResourcePool method. */ - export interface Schema$LearningGenaiRecitationMMRecitationCheckResult { - chunkResults?: Schema$LearningGenaiRecitationContentChunkRecitationCheckResult[]; + export interface Schema$GoogleCloudAiplatformV1beta1UpdateDeploymentResourcePoolOperationMetadata { /** - * Overall recommended recitation action for the content. + * The operation generic information. */ - recitationAction?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * The recitation result for one input + * Runtime operation information for ModelService.UpdateExplanationDataset. */ - export interface Schema$LearningGenaiRecitationRecitationResult { - dynamicSegmentResults?: Schema$LearningGenaiRecitationSegmentResult[]; + export interface Schema$GoogleCloudAiplatformV1beta1UpdateExplanationDatasetOperationMetadata { /** - * The recitation action for one given input. When its segments contain different actions, the overall action will be returned in the precedence of BLOCK \> CITE \> NO_ACTION. When the given input is not found in any source, the recitation action will be NO_ACTION. + * The common part of the operation metadata. */ - recitationAction?: string | null; - trainingSegmentResults?: Schema$LearningGenaiRecitationSegmentResult[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * The recitation result for each segment in a given input. + * Request message for ModelService.UpdateExplanationDataset. */ - export interface Schema$LearningGenaiRecitationSegmentResult { - /** - * The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly. - */ - attributionDataset?: string | null; - /** - * human-friendly string that contains information from doc_attribution which could be shown by clients - */ - displayAttributionMessage?: string | null; - docAttribution?: Schema$LearningGenaiRecitationDocAttribution; - /** - * number of documents that contained this segment - */ - docOccurrences?: number | null; - endIndex?: number | null; + export interface Schema$GoogleCloudAiplatformV1beta1UpdateExplanationDatasetRequest { /** - * The raw text in the given input that is corresponding to the segment. It will be available only when 'return_segment_raw_text' is enabled in the request options. + * The example config containing the location of the dataset. */ - rawText?: string | null; - segmentRecitationAction?: string | null; + examples?: Schema$GoogleCloudAiplatformV1beta1Examples; + } + /** + * Response message of ModelService.UpdateExplanationDataset operation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UpdateExplanationDatasetResponse {} + /** + * Details of operations that perform update FeatureGroup. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureGroupOperationMetadata { /** - * The category of the source dataset where the segment came from. This is more stable than Dataset. + * Operation metadata for FeatureGroup. */ - sourceCategory?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Details of operations that perform update FeatureOnlineStore. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureOnlineStoreOperationMetadata { /** - * The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units. + * Operation metadata for FeatureOnlineStore. */ - startIndex?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * The type used for final weights calculation. + * Details of operations that perform update Feature. */ - export interface Schema$LearningGenaiRootCalculationType { - scoreType?: string | null; - weights?: number | null; - } - export interface Schema$LearningGenaiRootClassifierOutput { + export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureOperationMetadata { /** - * If set, this is the output of the first matching rule. + * Operation metadata for Feature Update. */ - ruleOutput?: Schema$LearningGenaiRootRuleOutput; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Details of operations that perform update Featurestore. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeaturestoreOperationMetadata { /** - * outputs of all matching rule. + * Operation metadata for Featurestore. */ - ruleOutputs?: Schema$LearningGenaiRootRuleOutput[]; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; + } + /** + * Details of operations that perform update FeatureView. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UpdateFeatureViewOperationMetadata { /** - * The results of data_providers and metrics. + * Operation metadata for FeatureView Update. */ - state?: Schema$LearningGenaiRootClassifierState; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } - export interface Schema$LearningGenaiRootClassifierOutputSummary { - metrics?: Schema$LearningGenaiRootMetricOutput[]; + /** + * Runtime operation information for IndexService.UpdateIndex. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UpdateIndexOperationMetadata { /** - * Output of the first matching rule. + * The operation generic information. */ - ruleOutput?: Schema$LearningGenaiRootRuleOutput; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * outputs of all matching rule. + * The operation metadata with regard to Matching Engine Index operation. */ - ruleOutputs?: Schema$LearningGenaiRootRuleOutput[]; + nearestNeighborSearchOperationMetadata?: Schema$GoogleCloudAiplatformV1beta1NearestNeighborSearchOperationMetadata; } /** - * DataProviderOutput and MetricOutput can be saved between calls to the Classifier framework. For instance, you can run the query classifier, get outputs from those metrics, then use them in a result classifier as well. Example rule based on this idea: and_rules { rule { metric_name: 'query_safesearch_v2' ... \} rule { metric_name: 'response_safesearch_v2' ... \} \} + * Runtime operation information for JobService.UpdateModelDeploymentMonitoringJob. */ - export interface Schema$LearningGenaiRootClassifierState { - dataProviderOutput?: Schema$LearningGenaiRootDataProviderOutput[]; - metricOutput?: Schema$LearningGenaiRootMetricOutput[]; + export interface Schema$GoogleCloudAiplatformV1beta1UpdateModelDeploymentMonitoringJobOperationMetadata { + /** + * The operation generic information. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Stores all metadata relating to AIDA DoConversation. + * Runtime operation information for ModelMonitoringService.UpdateModelMonitor. */ - export interface Schema$LearningGenaiRootCodeyChatMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1UpdateModelMonitorOperationMetadata { /** - * Indicates the programming language of the code if the message is a code chunk. + * The operation generic information. */ - codeLanguage?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Describes a sample at a checkpoint for post-processing. + * Details of operations that perform update PersistentResource. */ - export interface Schema$LearningGenaiRootCodeyCheckpoint { - /** - * Metadata that describes what was truncated at this checkpoint. - */ - codeyTruncatorMetadata?: Schema$LearningGenaiRootCodeyTruncatorMetadata; + export interface Schema$GoogleCloudAiplatformV1beta1UpdatePersistentResourceOperationMetadata { /** - * Current state of the sample after truncator. + * Operation metadata for PersistentResource. */ - currentSample?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Postprocessor run that yielded this checkpoint. + * Progress Message for Update LRO */ - postInferenceStep?: string | null; - } - /** - * Stores all metadata relating to Completion. - */ - export interface Schema$LearningGenaiRootCodeyCompletionMetadata { - checkpoints?: Schema$LearningGenaiRootCodeyCheckpoint[]; + progressMessage?: string | null; } /** - * Stores all metadata relating to GenerateCode. + * Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool. */ - export interface Schema$LearningGenaiRootCodeyGenerationMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1UpdateSpecialistPoolOperationMetadata { /** - * Last state of the sample before getting dropped/returned. + * The operation generic information. */ - output?: string | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Last Codey postprocessing step for this sample before getting dropped/returned. + * Output only. The name of the SpecialistPool to which the specialists are being added. Format: `projects/{project_id\}/locations/{location_id\}/specialistPools/{specialist_pool\}` */ - postInferenceStep?: string | null; + specialistPool?: string | null; } /** - * Top-level wrapper used to store all things codey-related. + * Details of operations that perform update Tensorboard. */ - export interface Schema$LearningGenaiRootCodeyOutput { - codeyChatMetadata?: Schema$LearningGenaiRootCodeyChatMetadata; - codeyCompletionMetadata?: Schema$LearningGenaiRootCodeyCompletionMetadata; - codeyGenerationMetadata?: Schema$LearningGenaiRootCodeyGenerationMetadata; + export interface Schema$GoogleCloudAiplatformV1beta1UpdateTensorboardOperationMetadata { + /** + * Operation metadata for Tensorboard. + */ + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } /** - * Metadata describing what was truncated at each checkpoint. + * Metadata information for NotebookService.UpgradeNotebookRuntime. */ - export interface Schema$LearningGenaiRootCodeyTruncatorMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeOperationMetadata { /** - * Index of the current sample that trims off truncated text. + * The operation generic information. */ - cutoffIndex?: number | null; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; /** - * Text that was truncated at a specific checkpoint. + * A human-readable message that shows the intermediate progress details of NotebookRuntime. */ - truncatedText?: string | null; + progressMessage?: string | null; } /** - * Score threshold for a category. + * Request message for NotebookService.UpgradeNotebookRuntime. */ - export interface Schema$LearningGenaiRootControlDecodingConfigThreshold { - policy?: string | null; - scoreMax?: number | null; - } + export interface Schema$GoogleCloudAiplatformV1beta1UpgradeNotebookRuntimeRequest {} /** - * Holds one control decoding record. + * Details of ModelService.UploadModel operation. */ - export interface Schema$LearningGenaiRootControlDecodingRecord { - /** - * Prefixes feeded into scorer. - */ - prefixes?: string | null; - /** - * Per policy scores returned from Scorer. Expect to have the same number of scores as in `thresholds`. - */ - scores?: Schema$LearningGenaiRootControlDecodingRecordPolicyScore[]; - /** - * Suffixes feeded into scorer. - */ - suffiexes?: string | null; - /** - * Per policy thresholds from user config. - */ - thresholds?: Schema$LearningGenaiRootControlDecodingConfigThreshold[]; - } - export interface Schema$LearningGenaiRootControlDecodingRecordPolicyScore { - policy?: string | null; - score?: number | null; - } - export interface Schema$LearningGenaiRootControlDecodingRecords { - /** - * One ControlDecodingRecord record maps to one rewind. - */ - records?: Schema$LearningGenaiRootControlDecodingRecord[]; - } - export interface Schema$LearningGenaiRootDataProviderOutput { - name?: string | null; + export interface Schema$GoogleCloudAiplatformV1beta1UploadModelOperationMetadata { /** - * If set, this DataProvider failed and this is the error message. + * The common part of the operation metadata. */ - status?: Schema$UtilStatusProto; + genericMetadata?: Schema$GoogleCloudAiplatformV1beta1GenericOperationMetadata; } - export interface Schema$LearningGenaiRootFilterMetadata { - /** - * Filter confidence. - */ - confidence?: string | null; - /** - * Debug info for the message. - */ - debugInfo?: Schema$LearningGenaiRootFilterMetadataFilterDebugInfo; - /** - * A fallback message chosen by the applied filter. - */ - fallback?: string | null; + /** + * Request message for ModelService.UploadModel. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UploadModelRequest { /** - * Additional info for the filter. + * Required. The Model to create. */ - info?: string | null; + model?: Schema$GoogleCloudAiplatformV1beta1Model; /** - * Name of the filter that triggered. + * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. */ - name?: string | null; + modelId?: string | null; /** - * Filter reason. + * Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. */ - reason?: string | null; + parentModel?: string | null; /** - * The input query or generated response that is getting filtered. + * Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). */ - text?: string | null; + serviceAccount?: string | null; } - export interface Schema$LearningGenaiRootFilterMetadataFilterDebugInfo { - classifierOutput?: Schema$LearningGenaiRootClassifierOutput; - defaultMetadata?: string | null; - languageFilterResult?: Schema$LearningGenaiRootLanguageFilterResult; + /** + * Response message of ModelService.UploadModel operation. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UploadModelResponse { /** - * Safety filter output information for LLM Root RAI harm check. + * The name of the uploaded Model resource. Format: `projects/{project\}/locations/{location\}/models/{model\}` */ - raiOutput?: Schema$LearningGenaiRootRAIOutput; - raiResult?: Schema$CloudAiNlLlmProtoServiceRaiResult; - raiSignal?: Schema$CloudAiNlLlmProtoServiceRaiSignal; + model?: string | null; /** - * Number of rewinds by controlled decoding. + * Output only. The version ID of the model that is uploaded. */ - records?: Schema$LearningGenaiRootControlDecodingRecords; - streamRecitationResult?: Schema$LanguageLabsAidaTrustRecitationProtoStreamRecitationResult; - takedownResult?: Schema$LearningGenaiRootTakedownResult; - toxicityResult?: Schema$LearningGenaiRootToxicityResult; + modelVersionId?: string | null; } - export interface Schema$LearningGenaiRootGroundingMetadata { - citations?: Schema$LearningGenaiRootGroundingMetadataCitation[]; + /** + * Config for uploading RagFile. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UploadRagFileConfig { /** - * True if grounding is cancelled, for example, no facts being retrieved. + * Specifies the size and overlap of chunks after uploading RagFile. */ - groundingCancelled?: boolean | null; - searchQueries?: string[] | null; + ragFileChunkingConfig?: Schema$GoogleCloudAiplatformV1beta1RagFileChunkingConfig; } - export interface Schema$LearningGenaiRootGroundingMetadataCitation { - /** - * Index in the prediction output where the citation ends (exclusive). Must be \> start_index and <= len(output). - */ - endIndex?: number | null; - /** - * Index of the fact supporting this claim. Should be within the range of the `world_facts` in the GenerateResponse. - */ - factIndex?: number | null; + /** + * Request message for VertexRagDataService.UploadRagFile. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UploadRagFileRequest { /** - * Confidence score of this entailment. Value is [0,1] with 1 is the most confidence. + * Required. The RagFile to upload. */ - score?: number | null; + ragFile?: Schema$GoogleCloudAiplatformV1beta1RagFile; /** - * Index in the prediction output where the citation starts (inclusive). Must be \>= 0 and < end_index. + * Required. The config for the RagFiles to be uploaded into the RagCorpus. VertexRagDataService.UploadRagFile. */ - startIndex?: number | null; + uploadRagFileConfig?: Schema$GoogleCloudAiplatformV1beta1UploadRagFileConfig; } - export interface Schema$LearningGenaiRootHarm { + /** + * Response message for VertexRagDataService.UploadRagFile. + */ + export interface Schema$GoogleCloudAiplatformV1beta1UploadRagFileResponse { /** - * Please do not use, this is still under development. + * The error that occurred while processing the RagFile. */ - contextualDangerous?: boolean | null; - csam?: boolean | null; - fringe?: boolean | null; - grailImageHarmType?: Schema$LearningGenaiRootHarmGrailImageHarmType; - grailTextHarmType?: Schema$LearningGenaiRootHarmGrailTextHarmType; - imageChild?: boolean | null; - imageCsam?: boolean | null; - imagePedo?: boolean | null; + error?: Schema$GoogleRpcStatus; /** - * Image signals + * The RagFile that had been uploaded into the RagCorpus. */ - imagePorn?: boolean | null; - imageViolence?: boolean | null; - pqc?: boolean | null; - safetycat?: Schema$LearningGenaiRootHarmSafetyCatCategories; + ragFile?: Schema$GoogleCloudAiplatformV1beta1RagFile; + } + /** + * Request message for IndexService.UpsertDatapoints + */ + export interface Schema$GoogleCloudAiplatformV1beta1UpsertDatapointsRequest { /** - * Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l=77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for "POSSIBLE" is 3 / 5 = 0.6 . + * A list of datapoints to be created/updated. */ - spii?: Schema$LearningGenaiRootHarmSpiiFilter; - threshold?: number | null; - videoFrameChild?: boolean | null; - videoFrameCsam?: boolean | null; - videoFramePedo?: boolean | null; + datapoints?: Schema$GoogleCloudAiplatformV1beta1IndexDatapoint[]; /** - * Video frame signals + * Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts. */ - videoFramePorn?: boolean | null; - videoFrameViolence?: boolean | null; - } - /** - * Harm type for images - */ - export interface Schema$LearningGenaiRootHarmGrailImageHarmType { - imageHarmType?: string[] | null; - } - /** - * Harm type for text - */ - export interface Schema$LearningGenaiRootHarmGrailTextHarmType { - harmType?: string[] | null; + updateMask?: string | null; } /** - * LINT.ThenChange(//depot/google3/learning/genai/root/util/classifier/backends/grail/grail.cc) + * Response message for IndexService.UpsertDatapoints */ - export interface Schema$LearningGenaiRootHarmSafetyCatCategories { - categories?: string[] | null; - } + export interface Schema$GoogleCloudAiplatformV1beta1UpsertDatapointsResponse {} /** - * LINT.IfChange + * References an API call. It contains more information about long running operation and Jobs that are triggered by the API call. */ - export interface Schema$LearningGenaiRootHarmSpiiFilter { - usBankRoutingMicr?: boolean | null; - usEmployerIdentificationNumber?: boolean | null; - usSocialSecurityNumber?: boolean | null; - } - export interface Schema$LearningGenaiRootInternalMetadata { - scoredTokens?: Schema$LearningGenaiRootScoredToken[]; - } - export interface Schema$LearningGenaiRootLanguageFilterResult { - /** - * False when query or response should be filtered out due to unsupported language. - */ - allowed?: boolean | null; - /** - * Language of the query or response. - */ - detectedLanguage?: string | null; - /** - * Probability of the language predicted as returned by LangID. - */ - detectedLanguageProbability?: number | null; - } - export interface Schema$LearningGenaiRootMetricOutput { - debug?: string | null; - /** - * Name of the metric. - */ - name?: string | null; - numericValue?: number | null; - status?: Schema$UtilStatusProto; - stringValue?: string | null; - } - export interface Schema$LearningGenaiRootPerRequestProcessorDebugMetadataFactualityDebugMetadata { + export interface Schema$GoogleCloudAiplatformV1beta1UserActionReference { /** - * Latency spent on fact retrievals. There might be multiple retrievals from different fact providers. + * For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: `projects/{project\}/locations/{location\}/dataLabelingJobs/{data_labeling_job\}` */ - factRetrievalMillisecondsByProvider?: {[key: string]: string} | null; + dataLabelingJob?: string | null; /** - * Latency spent on prompt2query. The procedure generates a search-friendly query given the original prompt. + * The method name of the API RPC call. For example, "/google.cloud.aiplatform.{apiVersion\}.DatasetService.CreateDataset" */ - prompt2queryMilliseconds?: string | null; + method?: string | null; /** - * Latency if use GroundedGeneration service for the whole retrieval & augmentation. + * For API calls that return a long running operation. Resource name of the long running operation. Format: `projects/{project\}/locations/{location\}/operations/{operation\}` */ - retrievalAugmentMilliseconds?: string | null; + operation?: string | null; } /** - * This is per harm. + * Value is the value of the field. */ - export interface Schema$LearningGenaiRootRAIOutput { - allowed?: boolean | null; - harm?: Schema$LearningGenaiRootHarm; - name?: string | null; - score?: number | null; - } - export interface Schema$LearningGenaiRootRegexTakedownResult { - /** - * False when query or response should be taken down due to match with a blocked regex, true otherwise. - */ - allowed?: boolean | null; - /** - * Regex used to decide that query or response should be taken down. Empty when query or response is kept. - */ - takedownRegex?: string | null; - } - export interface Schema$LearningGenaiRootRequestMetrics { - /** - * Metrics for audio samples in the request. - */ - audioMetrics?: Schema$LearningGenaiRootRequestMetricsAudioMetrics; - /** - * Metrics for image samples in the request. - */ - imageMetrics?: Schema$LearningGenaiRootRequestMetricsImageMetrics; - /** - * Number of text tokens extracted from the request. - */ - textTokenCount?: number | null; - /** - * Total number of tokens in the request. - */ - totalTokenCount?: number | null; - /** - * Metrics for video samples in the request. - */ - videoMetrics?: Schema$LearningGenaiRootRequestMetricsVideoMetrics; - } - export interface Schema$LearningGenaiRootRequestMetricsAudioMetrics { + export interface Schema$GoogleCloudAiplatformV1beta1Value { /** - * Duration of the audio sample in seconds. + * A double value. */ - audioDuration?: string | null; + doubleValue?: number | null; /** - * Number of tokens derived directly from audio data. + * An integer value. */ - audioTokenCount?: number | null; + intValue?: string | null; /** - * Number of audio frames in the audio. + * A string value. */ - numAudioFrames?: number | null; + stringValue?: string | null; } - export interface Schema$LearningGenaiRootRequestMetricsImageMetrics { - /** - * Number of tokens extracted from image bytes. - */ - imageTokenCount?: number | null; + /** + * Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation + */ + export interface Schema$GoogleCloudAiplatformV1beta1VertexAISearch { /** - * Number of images in the request. + * Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project\}/locations/{location\}/collections/{collection\}/dataStores/{dataStore\}` */ - numImages?: number | null; + datastore?: string | null; } - export interface Schema$LearningGenaiRootRequestMetricsVideoMetrics { - /** - * Metrics associated with audio sample in the video. - */ - audioSample?: Schema$LearningGenaiRootRequestMetricsAudioMetrics; - /** - * Number of video frames in the video. - */ - numVideoFrames?: number | null; - /** - * Duration of the video sample in seconds. - */ - videoDuration?: string | null; + /** + * Retrieve from Vertex RAG Store for grounding. + */ + export interface Schema$GoogleCloudAiplatformV1beta1VertexRagStore { /** - * Number of tokens extracted from video frames. + * Optional. Deprecated. Please use rag_resources instead. */ - videoFramesTokenCount?: number | null; - } - export interface Schema$LearningGenaiRootRequestResponseTakedownResult { + ragCorpora?: string[] | null; /** - * False when response has to be taken down per above config. + * Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. */ - allowed?: boolean | null; + ragResources?: Schema$GoogleCloudAiplatformV1beta1VertexRagStoreRagResource[]; /** - * Regex used to match the request. + * Optional. Number of top k results to return from the selected corpora. */ - requestTakedownRegex?: string | null; + similarityTopK?: number | null; /** - * Regex used to decide that response should be taken down. Empty when response is kept. + * Optional. Only return results with vector distance smaller than the threshold. */ - responseTakedownRegex?: string | null; + vectorDistanceThreshold?: number | null; } /** - * Holds the final routing decision, by storing the model_config_id. And individual scores each model got. + * The definition of the Rag resource. */ - export interface Schema$LearningGenaiRootRoutingDecision { - metadata?: Schema$LearningGenaiRootRoutingDecisionMetadata; + export interface Schema$GoogleCloudAiplatformV1beta1VertexRagStoreRagResource { /** - * The selected model to route traffic to. + * Optional. RagCorpora resource name. Format: `projects/{project\}/locations/{location\}/ragCorpora/{rag_corpus\}` */ - modelConfigId?: string | null; - } - /** - * Debug metadata about the routing decision. - */ - export interface Schema$LearningGenaiRootRoutingDecisionMetadata { - scoreBasedRoutingMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataScoreBased; - tokenLengthBasedRoutingMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBased; + ragCorpus?: string | null; + /** + * Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. + */ + ragFileIds?: string[] | null; } /** - * If we are routing using scored based configuration, then the metadata about that is available in this proto. + * Metadata describes the input video content. */ - export interface Schema$LearningGenaiRootRoutingDecisionMetadataScoreBased { - /** - * The rule that was matched. - */ - matchedRule?: Schema$LearningGenaiRootScoreBasedRoutingConfigRule; + export interface Schema$GoogleCloudAiplatformV1beta1VideoMetadata { /** - * The score that was generated by the router i.e. the model. + * Optional. The end offset of the video. */ - score?: Schema$LearningGenaiRootScore; + endOffset?: string | null; /** - * No rules were matched & therefore used the default fallback. + * Optional. The start offset of the video. */ - usedDefaultFallback?: boolean | null; - } - export interface Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBased { - modelInputTokenMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata[]; - modelMaxTokenMetadata?: Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata[]; + startOffset?: string | null; } - export interface Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelInputTokenMetadata { - /** - * The length computed by backends using the formatter & tokenizer specific to the model - */ - computedInputTokenLength?: number | null; - modelId?: string | null; + /** + * Represents the spec of a worker pool in a job. + */ + export interface Schema$GoogleCloudAiplatformV1beta1WorkerPoolSpec { /** - * If true, the model was selected as a fallback, since no model met requirements. + * The custom container task. */ - pickedAsFallback?: boolean | null; + containerSpec?: Schema$GoogleCloudAiplatformV1beta1ContainerSpec; /** - * If true, the model was selected since it met the requriements. + * Disk spec. */ - selected?: boolean | null; - } - export interface Schema$LearningGenaiRootRoutingDecisionMetadataTokenLengthBasedModelMaxTokenMetadata { - maxNumInputTokens?: number | null; - maxNumOutputTokens?: number | null; - modelId?: string | null; - } - export interface Schema$LearningGenaiRootRuleOutput { - decision?: string | null; - name?: string | null; - } - export interface Schema$LearningGenaiRootScore { - calculationType?: Schema$LearningGenaiRootCalculationType; + diskSpec?: Schema$GoogleCloudAiplatformV1beta1DiskSpec; /** - * The internal_metadata is intended to be used by internal processors and will be cleared before returns. + * Optional. Immutable. The specification of a single machine. */ - internalMetadata?: Schema$LearningGenaiRootInternalMetadata; - thresholdType?: Schema$LearningGenaiRootThresholdType; + machineSpec?: Schema$GoogleCloudAiplatformV1beta1MachineSpec; /** - * Top candidate tokens and log probabilities at each decoding step. + * Optional. List of NFS mount spec. */ - tokensAndLogprobPerDecodingStep?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStep; - value?: number | null; - } - export interface Schema$LearningGenaiRootScoreBasedRoutingConfigRule { + nfsMounts?: Schema$GoogleCloudAiplatformV1beta1NfsMount[]; /** - * NOTE: Hardest examples have smaller values in their routing scores. + * The Python packaged task. */ - equalOrGreaterThan?: Schema$LearningGenaiRootScore; - lessThan?: Schema$LearningGenaiRootScore; + pythonPackageSpec?: Schema$GoogleCloudAiplatformV1beta1PythonPackageSpec; /** - * This model_config_id points to ModelConfig::id which allows us to find the ModelConfig to route to. This is part of the banks specified in the ModelBankConfig. + * Optional. The number of worker replicas to use for this worker pool. */ - modelConfigId?: string | null; - } - /** - * Proto containing the results from the Universal Sentence Encoder / Other models - */ - export interface Schema$LearningGenaiRootScoredSimilarityTakedownPhrase { - phrase?: Schema$LearningGenaiRootSimilarityTakedownPhrase; - similarityScore?: number | null; + replicaCount?: string | null; } /** - * A token with its own score. + * Contains Feature values to be written for a specific entity. */ - export interface Schema$LearningGenaiRootScoredToken { + export interface Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload { /** - * Each end_token_score is a logprob for how well the completion would end at a particular token. See http://google3/labs/language/aida/config/proto/model_config.proto;l=376;rcl=573039459 + * Required. The ID of the entity. */ - endTokenScore?: number | null; + entityId?: string | null; /** - * Each score is the logprob for the token in model response. + * Required. Feature values to be written, mapping from Feature ID to value. Up to 100,000 `feature_values` entries may be written across all payloads. The feature generation time, aligned by days, must be no older than five years (1825 days) and no later than one year (366 days) in the future. */ - score?: number | null; - token?: string | null; + featureValues?: { + [key: string]: Schema$GoogleCloudAiplatformV1beta1FeatureValue; + } | null; } /** - * Each SimilarityTakedownPhrase treats a logical group of blocked and allowed phrases together along with a corresponding punt If the closest matching response is of the allowed type, we allow the response If the closest matching response is of the blocked type, we block the response. eg: Blocked phrase - "All lives matter" + * Request message for FeaturestoreOnlineServingService.WriteFeatureValues. */ - export interface Schema$LearningGenaiRootSimilarityTakedownPhrase { - blockedPhrase?: string | null; - } - export interface Schema$LearningGenaiRootSimilarityTakedownResult { - /** - * False when query or response should be taken down by any of the takedown rules, true otherwise. - */ - allowed?: boolean | null; + export interface Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesRequest { /** - * List of similar phrases with score. Set only if allowed=false. + * Required. The entities to be written. Up to 100,000 feature values can be written across all `payloads`. */ - scoredPhrases?: Schema$LearningGenaiRootScoredSimilarityTakedownPhrase[]; + payloads?: Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesPayload[]; } - export interface Schema$LearningGenaiRootTakedownResult { + /** + * Response message for FeaturestoreOnlineServingService.WriteFeatureValues. + */ + export interface Schema$GoogleCloudAiplatformV1beta1WriteFeatureValuesResponse {} + /** + * Request message for TensorboardService.WriteTensorboardExperimentData. + */ + export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataRequest { /** - * False when query or response should be taken down by any of the takedown rules, true otherwise. + * Required. Requests containing per-run TensorboardTimeSeries data to write. */ - allowed?: boolean | null; - regexTakedownResult?: Schema$LearningGenaiRootRegexTakedownResult; - requestResponseTakedownResult?: Schema$LearningGenaiRootRequestResponseTakedownResult; - similarityTakedownResult?: Schema$LearningGenaiRootSimilarityTakedownResult; + writeRunDataRequests?: Schema$GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest[]; } /** - * The type of score that bundled with a threshold, and will not be attending the final score calculation. How each score type uses the threshold can be implementation details. + * Response message for TensorboardService.WriteTensorboardExperimentData. */ - export interface Schema$LearningGenaiRootThresholdType { - scoreType?: string | null; - threshold?: number | null; - } + export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardExperimentDataResponse {} /** - * Results of RandomSamplingParams::top_k_logprob_per_decoding_step. + * Request message for TensorboardService.WriteTensorboardRunData. */ - export interface Schema$LearningGenaiRootTokensAndLogProbPerDecodingStep { + export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardRunDataRequest { /** - * Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. + * Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project\}/locations/{location\}/tensorboards/{tensorboard\}/experiments/{experiment\}/runs/{run\}` */ - chosenCandidates?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[]; + tensorboardRun?: string | null; /** - * Length = total number of decoding steps. + * Required. The TensorboardTimeSeries data to write. Values with in a time series are indexed by their step value. Repeated writes to the same step will overwrite the existing value for that step. The upper limit of data points per write request is 5000. */ - topCandidates?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates[]; + timeSeriesData?: Schema$GoogleCloudAiplatformV1beta1TimeSeriesData[]; } /** - * A candidate at a decoding step. + * Response message for TensorboardService.WriteTensorboardRunData. + */ + export interface Schema$GoogleCloudAiplatformV1beta1WriteTensorboardRunDataResponse {} + /** + * An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. */ - export interface Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate { + export interface Schema$GoogleCloudAiplatformV1beta1XraiAttribution { /** - * The candidate's log probability. + * Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 */ - logProbability?: number | null; + blurBaselineConfig?: Schema$GoogleCloudAiplatformV1beta1BlurBaselineConfig; /** - * The candidate’s token value. + * Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf */ - token?: string | null; - } - /** - * Candidates with top log probabilities at each decoding step. - */ - export interface Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepTopCandidates { + smoothGradConfig?: Schema$GoogleCloudAiplatformV1beta1SmoothGradConfig; /** - * Sorted by log probability in descending order. + * Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively. */ - candidates?: Schema$LearningGenaiRootTokensAndLogProbPerDecodingStepCandidate[]; - } - /** - * A model can generate multiple signals and this captures all the generated signals for a single message. - */ - export interface Schema$LearningGenaiRootToxicityResult { - signals?: Schema$LearningGenaiRootToxicitySignal[]; - } - /** - * Proto to capture a signal generated by the toxicity model. - */ - export interface Schema$LearningGenaiRootToxicitySignal { - allowed?: boolean | null; - label?: string | null; - score?: number | null; + stepCount?: number | null; } /** - * Each TranslationRequestInfo corresponds to a request sent to the translation server. + * The response message for Locations.ListLocations. */ - export interface Schema$LearningGenaiRootTranslationRequestInfo { + export interface Schema$GoogleCloudLocationListLocationsResponse { /** - * The ISO-639 language code of source text in the initial request, detected automatically, if no source language was passed within the initial request. If the source language was passed, auto-detection of the language does not occur and this field is empty. + * A list of locations that matches the specified filter in the request. */ - detectedLanguageCodes?: string[] | null; + locations?: Schema$GoogleCloudLocationLocation[]; /** - * The sum of the size of all the contents in the request. + * The standard List next-page token. */ - totalContentSize?: string | null; - } - export interface Schema$LearningServingLlmAtlasOutputMetadata { - requestTopic?: string | null; - source?: string | null; + nextPageToken?: string | null; } /** - * LINT.IfChange This metadata contains additional information required for debugging. + * A resource that represents a Google Cloud location. */ - export interface Schema$LearningServingLlmMessageMetadata { - atlasMetadata?: Schema$LearningServingLlmAtlasOutputMetadata; + export interface Schema$GoogleCloudLocationLocation { /** - * Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not. + * The friendly name for this location, typically a nearby city name. For example, "Tokyo". */ - classifierSummary?: Schema$LearningGenaiRootClassifierOutputSummary; + displayName?: string | null; /** - * Contains metadata related to Codey Processors. + * Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"\} */ - codeyOutput?: Schema$LearningGenaiRootCodeyOutput; - currentStreamTextLength?: number | null; + labels?: {[key: string]: string} | null; /** - * Whether the corresponding message has been deleted. + * The canonical id for this location. For example: `"us-east1"`. */ - deleted?: boolean | null; + locationId?: string | null; /** - * Metadata for filters that triggered. + * Service-specific metadata. For example the available capacity at the given location. */ - filterMeta?: Schema$LearningGenaiRootFilterMetadata[]; + metadata?: {[key: string]: any} | null; /** - * This score is finally used for ranking the message. This will be same as the score present in `Message.score` field. + * Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"` */ - finalMessageScore?: Schema$LearningGenaiRootScore; + name?: string | null; + } + /** + * Associates `members`, or principals, with a `role`. + */ + export interface Schema$GoogleIamV1Binding { /** - * NOT YET IMPLEMENTED. + * The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ - finishReason?: string | null; - groundingMetadata?: Schema$LearningGenaiRootGroundingMetadata; + condition?: Schema$GoogleTypeExpr; /** - * Applies to streaming response message only. Whether the message is a code. + * Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid\}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid\}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid\}.svc.id.goog[{namespace\}/{kubernetes-sa\}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid\}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain\}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/group/{group_id\}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/x`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/group/{group_id\}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/x`: All identities in a workload identity pool. * `deleted:user:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid\}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid\}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid\}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`. */ - isCode?: boolean | null; + members?: string[] | null; /** - * Applies to Response message only. Indicates whether the message is a fallback and the response would have otherwise been empty. + * Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles). */ - isFallback?: boolean | null; + role?: string | null; + } + /** + * Request message for `GetIamPolicy` method. + */ + export interface Schema$GoogleIamV1GetIamPolicyRequest { /** - * Result from nlp_saft DetectLanguage method. Currently the predicted language code and language probability is used. + * OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`. */ - langidResult?: Schema$NlpSaftLangIdResult; + options?: Schema$GoogleIamV1GetPolicyOptions; + } + /** + * Encapsulates settings provided to GetIamPolicy. + */ + export interface Schema$GoogleIamV1GetPolicyOptions { /** - * Detected language. + * Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ - language?: string | null; + requestedPolicyVersion?: number | null; + } + /** + * An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] \}, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", \} \} ], "etag": "BwWWja0YfJA=", "version": 3 \} ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). + */ + export interface Schema$GoogleIamV1Policy { /** - * The LM prefix used to generate this response. + * Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`. */ - lmPrefix?: string | null; + bindings?: Schema$GoogleIamV1Binding[]; /** - * FOR LMROOT INTERNAL USE ONLY. Externally, use learning.genai.root.RequestMetadata.RequestMetrics. Request metrics per modality including token count, duration, num_frames. + * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. */ - lmrootInternalRequestMetrics?: Schema$LearningGenaiRootRequestMetrics; + etag?: string | null; /** - * Multi modal recitation results. It will be populated as long as Multi modal Recitation processor is invoked. + * Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ - mmRecitationResult?: Schema$LearningGenaiRecitationMMRecitationCheckResult; + version?: number | null; + } + /** + * Request message for `SetIamPolicy` method. + */ + export interface Schema$GoogleIamV1SetIamPolicyRequest { /** - * Number of Controlled Decoding rewind and repeats that have happened for this response. + * REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them. */ - numRewinds?: number | null; + policy?: Schema$GoogleIamV1Policy; + } + /** + * Request message for `TestIamPermissions` method. + */ + export interface Schema$GoogleIamV1TestIamPermissionsRequest { /** - * The original text generated by LLM. This is the raw output for debugging purposes. + * The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). */ - originalText?: string | null; + permissions?: string[] | null; + } + /** + * Response message for `TestIamPermissions` method. + */ + export interface Schema$GoogleIamV1TestIamPermissionsResponse { /** - * Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only. + * A subset of `TestPermissionsRequest.permissions` that the caller is allowed. */ - perStreamDecodedTokenCount?: number | null; + permissions?: string[] | null; + } + /** + * The response message for Operations.ListOperations. + */ + export interface Schema$GoogleLongrunningListOperationsResponse { /** - * Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only. + * The standard List next-page token. */ - perStreamReturnedTokenCount?: number | null; + nextPageToken?: string | null; /** - * Results of running RAI on the query or this response candidate. One output per rai_config. It will be populated regardless of whether the threshold is exceeded or not. + * A list of operations that matches the specified filter in the request. */ - raiOutputs?: Schema$LearningGenaiRootRAIOutput[]; + operations?: Schema$GoogleLongrunningOperation[]; + } + /** + * This resource represents a long-running operation that is the result of a network API call. + */ + export interface Schema$GoogleLongrunningOperation { /** - * Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome. + * If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. */ - recitationResult?: Schema$LearningGenaiRecitationRecitationResult; + done?: boolean | null; /** - * All the different scores for a message are logged here. + * The error result of the operation in case of failure or cancellation. */ - scores?: Schema$LearningGenaiRootScore[]; + error?: Schema$GoogleRpcStatus; /** - * Whether the response is terminated during streaming return. Only used for streaming requests. + * Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. */ - streamTerminated?: boolean | null; + metadata?: {[key: string]: any} | null; /** - * Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate. + * The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id\}`. */ - totalDecodedTokenCount?: number | null; + name?: string | null; /** - * Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only. + * The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. */ - totalReturnedTokenCount?: number | null; + response?: {[key: string]: any} | null; + } + /** + * A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); \} + */ + export interface Schema$GoogleProtobufEmpty {} + /** + * The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). + */ + export interface Schema$GoogleRpcStatus { /** - * Translated user-prompt used for RAI post processing. This is for internal processing only. We will translate in pre-processor and pass the translated text to the post processor using this field. It will be empty if non of the signals requested need translation. + * The status code, which should be an enum value of google.rpc.Code. */ - translatedUserPrompts?: string[] | null; + code?: number | null; /** - * The metadata from Vertex SafetyCat processors + * A list of messages that carry the error details. There is a common set of message types for APIs to use. */ - vertexRaiResult?: Schema$CloudAiNlLlmProtoServiceRaiResult; - } - export interface Schema$NlpSaftLangIdLocalesResult { + details?: Array<{[key: string]: any}> | null; /** - * List of locales in which the text would be considered acceptable. Sorted in descending order according to each locale's respective likelihood. For example, if a Portuguese text is acceptable in both Brazil and Portugal, but is more strongly associated with Brazil, then the predictions would be ["pt-BR", "pt-PT"], in that order. May be empty, indicating that the model did not predict any acceptable locales. + * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ - predictions?: Schema$NlpSaftLangIdLocalesResultLocale[]; + message?: string | null; } - export interface Schema$NlpSaftLangIdLocalesResultLocale { + /** + * Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); \} public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); \} return resultBuilder.build(); \} // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; \} return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; \} static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; \} Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; \} [result autorelease]; return result; \} // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); \} var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); \}; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); \} resultBuilder.push(hexString); return resultBuilder.join(''); \}; // ... + */ + export interface Schema$GoogleTypeColor { /** - * A BCP 47 language code that includes region information. For example, "pt-BR" or "pt-PT". This field will always be populated. + * The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). */ - languageCode?: string | null; - } - export interface Schema$NlpSaftLangIdResult { + alpha?: number | null; /** - * The version of the model used to create these annotations. + * The amount of blue in the color as a value in the interval [0, 1]. */ - modelVersion?: string | null; + blue?: number | null; /** - * This field stores the n-best list of possible BCP 47 language code strings for a given input sorted in descending order according to each code's respective probability. + * The amount of green in the color as a value in the interval [0, 1]. */ - predictions?: Schema$NlpSaftLanguageSpan[]; + green?: number | null; /** - * This field stores language predictions of subspans of the input, when available. Each LanguageSpanSequence is a sequence of LanguageSpans. A particular sequence of LanguageSpans has an associated probability, and need not necessarily cover the entire input. If no language could be predicted for any span, then this field may be empty. + * The amount of red in the color as a value in the interval [0, 1]. */ - spanPredictions?: Schema$NlpSaftLanguageSpanSequence[]; + red?: number | null; } - export interface Schema$NlpSaftLanguageSpan { - end?: number | null; + /** + * Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp + */ + export interface Schema$GoogleTypeDate { + /** + * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. + */ + day?: number | null; /** - * A BCP 47 language code for this span. + * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ - languageCode?: string | null; + month?: number | null; /** - * Optional field containing any information that was predicted about the specific locale(s) of the span. + * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ - locales?: Schema$NlpSaftLangIdLocalesResult; + year?: number | null; + } + /** + * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. + */ + export interface Schema$GoogleTypeExpr { /** - * A probability associated with this prediction. + * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ - probability?: number | null; + description?: string | null; /** - * Start and end byte offsets, inclusive, within the given input string. A value of -1 implies that this field is not set. Both fields must either be set with a nonnegative value or both are unset. If both are unset then this LanguageSpan applies to the entire input. + * Textual representation of an expression in Common Expression Language syntax. */ - start?: number | null; - } - export interface Schema$NlpSaftLanguageSpanSequence { + expression?: string | null; /** - * A sequence of LanguageSpan objects, each assigning a language to a subspan of the input. + * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ - languageSpans?: Schema$NlpSaftLanguageSpan[]; + location?: string | null; /** - * The probability of this sequence of LanguageSpans. + * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ - probability?: number | null; + title?: string | null; } /** - * This is proto2's version of MessageSet. - */ - export interface Schema$Proto2BridgeMessageSet {} - /** - * Wire-format for a Status object + * Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time. */ - export interface Schema$UtilStatusProto { + export interface Schema$GoogleTypeInterval { /** - * The canonical error code (see codes.proto) that most closely corresponds to this status. This may be missing, and in the common case of the generic space, it definitely will be. + * Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end. */ - canonicalCode?: number | null; + endTime?: string | null; /** - * Numeric code drawn from the space specified below. Often, this is the canonical error space, and code is drawn from google3/util/task/codes.proto + * Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start. */ - code?: number | null; + startTime?: string | null; + } + /** + * Represents an amount of money with its currency type. + */ + export interface Schema$GoogleTypeMoney { /** - * Detail message + * The three-letter currency code defined in ISO 4217. */ - message?: string | null; + currencyCode?: string | null; /** - * message_set associates an arbitrary proto message with the status. + * Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000. */ - messageSet?: Schema$Proto2BridgeMessageSet; + nanos?: number | null; /** - * The following are usually only present when code != 0 Space to which this status belongs + * The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. */ - space?: string | null; + units?: string | null; } export class Resource$Media { From 3f14227af4de4c437a50d59a797b626210b5059e Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Fri, 10 May 2024 17:24:43 +0000 Subject: [PATCH 2/7] feat(container): update the API #### container:v1 The following keys were added: - schemas.Cluster.properties.satisfiesPzi.description - schemas.Cluster.properties.satisfiesPzi.readOnly - schemas.Cluster.properties.satisfiesPzi.type - schemas.Cluster.properties.satisfiesPzs.description - schemas.Cluster.properties.satisfiesPzs.readOnly - schemas.Cluster.properties.satisfiesPzs.type - schemas.ClusterUpdate.properties.desiredNodeKubeletConfig.$ref - schemas.ClusterUpdate.properties.desiredNodeKubeletConfig.description - schemas.ClusterUpdate.properties.desiredNodePoolAutoConfigKubeletConfig.$ref - schemas.ClusterUpdate.properties.desiredNodePoolAutoConfigKubeletConfig.description - schemas.NodeConfigDefaults.properties.nodeKubeletConfig.$ref - schemas.NodeConfigDefaults.properties.nodeKubeletConfig.description - schemas.NodePoolAutoConfig.properties.nodeKubeletConfig.$ref - schemas.NodePoolAutoConfig.properties.nodeKubeletConfig.description --- discovery/container-v1.json | 28 +++++++++++++++++++++++++++- src/apis/container/v1.ts | 24 ++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/discovery/container-v1.json b/discovery/container-v1.json index a11ade1b28..3fb14156d8 100644 --- a/discovery/container-v1.json +++ b/discovery/container-v1.json @@ -2540,7 +2540,7 @@ } } }, - "revision": "20240409", + "revision": "20240422", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -3369,6 +3369,16 @@ "$ref": "ResourceUsageExportConfig", "description": "Configuration for exporting resource usages. Resource usage export is disabled when this config is unspecified." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, + "satisfiesPzs": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "securityPostureConfig": { "$ref": "SecurityPostureConfig", "description": "Enable/Disable Security Posture API features for the cluster." @@ -3661,6 +3671,14 @@ "$ref": "ClusterNetworkPerformanceConfig", "description": "The desired network performance config." }, + "desiredNodeKubeletConfig": { + "$ref": "NodeKubeletConfig", + "description": "The desired node kubelet config for the cluster." + }, + "desiredNodePoolAutoConfigKubeletConfig": { + "$ref": "NodeKubeletConfig", + "description": "The desired node kubelet config for all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters." + }, "desiredNodePoolAutoConfigNetworkTags": { "$ref": "NetworkTags", "description": "The desired network tags that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters." @@ -5463,6 +5481,10 @@ "loggingConfig": { "$ref": "NodePoolLoggingConfig", "description": "Logging configuration for node pools." + }, + "nodeKubeletConfig": { + "$ref": "NodeKubeletConfig", + "description": "NodeKubeletConfig controls the defaults for new node-pools. Currently only `insecure_kubelet_readonly_port_enabled` can be set here." } }, "type": "object" @@ -5709,6 +5731,10 @@ "$ref": "NetworkTags", "description": "The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during cluster creation. Each tag within the list must comply with RFC1035." }, + "nodeKubeletConfig": { + "$ref": "NodeKubeletConfig", + "description": "NodeKubeletConfig controls the defaults for autoprovisioned node-pools. Currently only `insecure_kubelet_readonly_port_enabled` can be set here." + }, "resourceManagerTags": { "$ref": "ResourceManagerTags", "description": "Resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies." diff --git a/src/apis/container/v1.ts b/src/apis/container/v1.ts index b1f318479f..7be0c5cdf0 100644 --- a/src/apis/container/v1.ts +++ b/src/apis/container/v1.ts @@ -779,6 +779,14 @@ export namespace container_v1 { * Configuration for exporting resource usages. Resource usage export is disabled when this config is unspecified. */ resourceUsageExportConfig?: Schema$ResourceUsageExportConfig; + /** + * Output only. Reserved for future use. + */ + satisfiesPzi?: boolean | null; + /** + * Output only. Reserved for future use. + */ + satisfiesPzs?: boolean | null; /** * Enable/Disable Security Posture API features for the cluster. */ @@ -998,6 +1006,14 @@ export namespace container_v1 { * The desired network performance config. */ desiredNetworkPerformanceConfig?: Schema$ClusterNetworkPerformanceConfig; + /** + * The desired node kubelet config for the cluster. + */ + desiredNodeKubeletConfig?: Schema$NodeKubeletConfig; + /** + * The desired node kubelet config for all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters. + */ + desiredNodePoolAutoConfigKubeletConfig?: Schema$NodeKubeletConfig; /** * The desired network tags that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters. */ @@ -2255,6 +2271,10 @@ export namespace container_v1 { * Logging configuration for node pools. */ loggingConfig?: Schema$NodePoolLoggingConfig; + /** + * NodeKubeletConfig controls the defaults for new node-pools. Currently only `insecure_kubelet_readonly_port_enabled` can be set here. + */ + nodeKubeletConfig?: Schema$NodeKubeletConfig; } /** * Node kubelet configs. @@ -2445,6 +2465,10 @@ export namespace container_v1 { * The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during cluster creation. Each tag within the list must comply with RFC1035. */ networkTags?: Schema$NetworkTags; + /** + * NodeKubeletConfig controls the defaults for autoprovisioned node-pools. Currently only `insecure_kubelet_readonly_port_enabled` can be set here. + */ + nodeKubeletConfig?: Schema$NodeKubeletConfig; /** * Resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. */ From 0bac1b265d633a84544c39af22c451fdaf012e62 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Fri, 10 May 2024 17:24:43 +0000 Subject: [PATCH 3/7] feat(logging): update the API #### logging:v2 The following keys were added: - schemas.LogMetric.properties.resourceName.description - schemas.LogMetric.properties.resourceName.readOnly - schemas.LogMetric.properties.resourceName.type --- discovery/logging-v2.json | 7 ++++++- src/apis/logging/v2.ts | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/discovery/logging-v2.json b/discovery/logging-v2.json index b27e022ba6..0817d8aad2 100644 --- a/discovery/logging-v2.json +++ b/discovery/logging-v2.json @@ -8132,7 +8132,7 @@ } } }, - "revision": "20240426", + "revision": "20240503", "rootUrl": "https://logging.googleapis.com/", "schemas": { "AuditConfig": { @@ -9576,6 +9576,11 @@ "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.This field is the [METRIC_ID] part of a metric resource name in the format \"projects/PROJECT_ID/metrics/METRIC_ID\". Example: If the resource name of a metric is \"projects/my-project/metrics/nginx%2Frequests\", this field's value is \"nginx/requests\".", "type": "string" }, + "resourceName": { + "description": "Output only. The resource name of the metric: \"projects/[PROJECT_ID]/metrics/[METRIC_ID]\" ", + "readOnly": true, + "type": "string" + }, "updateTime": { "description": "Output only. The last update timestamp of the metric.This field may not be present for older metrics.", "format": "google-datetime", diff --git a/src/apis/logging/v2.ts b/src/apis/logging/v2.ts index c32edac146..56af8d8925 100644 --- a/src/apis/logging/v2.ts +++ b/src/apis/logging/v2.ts @@ -1171,6 +1171,10 @@ export namespace logging_v2 { * Required. The client-assigned metric identifier. Examples: "error_count", "nginx/requests".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.This field is the [METRIC_ID] part of a metric resource name in the format "projects/PROJECT_ID/metrics/METRIC_ID". Example: If the resource name of a metric is "projects/my-project/metrics/nginx%2Frequests", this field's value is "nginx/requests". */ name?: string | null; + /** + * Output only. The resource name of the metric: "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + */ + resourceName?: string | null; /** * Output only. The last update timestamp of the metric.This field may not be present for older metrics. */ From 32bb4f618464e708bc3d470ab1411a57327dd1cf Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Fri, 10 May 2024 17:24:44 +0000 Subject: [PATCH 4/7] feat(networksecurity)!: update the API BREAKING CHANGE: This release has breaking changes. #### networksecurity:v1beta1 The following keys were deleted: - schemas.AddressGroup.properties.purpose.description - schemas.AddressGroup.properties.purpose.items.enum - schemas.AddressGroup.properties.purpose.items.enumDescriptions - schemas.AddressGroup.properties.purpose.items.type - schemas.AddressGroup.properties.purpose.type The following keys were changed: - schemas.SecurityProfile.description - schemas.SecurityProfileGroup.description #### networksecurity:v1 The following keys were changed: - schemas.SecurityProfile.description - schemas.SecurityProfileGroup.description --- discovery/networksecurity-v1.json | 6 +++--- discovery/networksecurity-v1beta1.json | 23 +++-------------------- src/apis/networksecurity/v1.ts | 4 ++-- src/apis/networksecurity/v1beta1.ts | 8 ++------ 4 files changed, 10 insertions(+), 31 deletions(-) diff --git a/discovery/networksecurity-v1.json b/discovery/networksecurity-v1.json index d19da05683..ad58709553 100644 --- a/discovery/networksecurity-v1.json +++ b/discovery/networksecurity-v1.json @@ -3162,7 +3162,7 @@ } } }, - "revision": "20240306", + "revision": "20240505", "rootUrl": "https://networksecurity.googleapis.com/", "schemas": { "AddAddressGroupItemsRequest": { @@ -4408,7 +4408,7 @@ "type": "object" }, "SecurityProfile": { - "description": "SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 9", + "description": "SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 10", "id": "SecurityProfile", "properties": { "createTime": { @@ -4463,7 +4463,7 @@ "type": "object" }, "SecurityProfileGroup": { - "description": "SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 8", + "description": "SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 9", "id": "SecurityProfileGroup", "properties": { "createTime": { diff --git a/discovery/networksecurity-v1beta1.json b/discovery/networksecurity-v1beta1.json index 8bd5285c67..7d5404f4f9 100644 --- a/discovery/networksecurity-v1beta1.json +++ b/discovery/networksecurity-v1beta1.json @@ -3162,7 +3162,7 @@ } } }, - "revision": "20240505", + "revision": "20240320", "rootUrl": "https://networksecurity.googleapis.com/", "schemas": { "AddAddressGroupItemsRequest": { @@ -3220,23 +3220,6 @@ "description": "Required. Name of the AddressGroup resource. It matches pattern `projects/*/locations/{location}/addressGroups/`.", "type": "string" }, - "purpose": { - "description": "Optional. List of supported purposes of the Address Group.", - "items": { - "enum": [ - "PURPOSE_UNSPECIFIED", - "DEFAULT", - "CLOUD_ARMOR" - ], - "enumDescriptions": [ - "Default value. Should never happen.", - "Address Group is distributed to VMC, and is usable in Firewall Policies and other systems that rely on VMC.", - "Address Group is usable in Cloud Armor." - ], - "type": "string" - }, - "type": "array" - }, "selfLink": { "description": "Output only. Server-defined fully-qualified URL for this resource.", "readOnly": true, @@ -4425,7 +4408,7 @@ "type": "object" }, "SecurityProfile": { - "description": "SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 10", + "description": "SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 9", "id": "SecurityProfile", "properties": { "createTime": { @@ -4480,7 +4463,7 @@ "type": "object" }, "SecurityProfileGroup": { - "description": "SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 9", + "description": "SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 8", "id": "SecurityProfileGroup", "properties": { "createTime": { diff --git a/src/apis/networksecurity/v1.ts b/src/apis/networksecurity/v1.ts index 277123d9e3..9efb09e6ff 100644 --- a/src/apis/networksecurity/v1.ts +++ b/src/apis/networksecurity/v1.ts @@ -988,7 +988,7 @@ export namespace networksecurity_v1 { sources?: Schema$Source[]; } /** - * SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 9 + * SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 10 */ export interface Schema$SecurityProfile { /** @@ -1025,7 +1025,7 @@ export namespace networksecurity_v1 { updateTime?: string | null; } /** - * SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 8 + * SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 9 */ export interface Schema$SecurityProfileGroup { /** diff --git a/src/apis/networksecurity/v1beta1.ts b/src/apis/networksecurity/v1beta1.ts index d9db37e525..a2d77455a7 100644 --- a/src/apis/networksecurity/v1beta1.ts +++ b/src/apis/networksecurity/v1beta1.ts @@ -167,10 +167,6 @@ export namespace networksecurity_v1beta1 { * Required. Name of the AddressGroup resource. It matches pattern `projects/x/locations/{location\}/addressGroups/`. */ name?: string | null; - /** - * Optional. List of supported purposes of the Address Group. - */ - purpose?: string[] | null; /** * Output only. Server-defined fully-qualified URL for this resource. */ @@ -992,7 +988,7 @@ export namespace networksecurity_v1beta1 { sources?: Schema$Source[]; } /** - * SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 10 + * SecurityProfile is a resource that defines the behavior for one of many ProfileTypes. Next ID: 9 */ export interface Schema$SecurityProfile { /** @@ -1029,7 +1025,7 @@ export namespace networksecurity_v1beta1 { updateTime?: string | null; } /** - * SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 9 + * SecurityProfileGroup is a resource that defines the behavior for various ProfileTypes. Next ID: 8 */ export interface Schema$SecurityProfileGroup { /** From 121af4a2062f4e549fb29d33a9559f12aa0b162b Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Fri, 10 May 2024 17:24:44 +0000 Subject: [PATCH 5/7] fix(networkservices): update the API #### networkservices:v1beta1 The following keys were changed: - schemas.Gateway.description #### networkservices:v1 The following keys were changed: - schemas.Gateway.description --- discovery/networkservices-v1.json | 4 ++-- discovery/networkservices-v1beta1.json | 4 ++-- src/apis/networkservices/v1.ts | 2 +- src/apis/networkservices/v1beta1.ts | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/discovery/networkservices-v1.json b/discovery/networkservices-v1.json index f53a39800b..e50b487f4d 100644 --- a/discovery/networkservices-v1.json +++ b/discovery/networkservices-v1.json @@ -2756,7 +2756,7 @@ } } }, - "revision": "20240502", + "revision": "20240415", "rootUrl": "https://networkservices.googleapis.com/", "schemas": { "AuditConfig": { @@ -3080,7 +3080,7 @@ "type": "object" }, "Gateway": { - "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 33", + "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 32", "id": "Gateway", "properties": { "addresses": { diff --git a/discovery/networkservices-v1beta1.json b/discovery/networkservices-v1beta1.json index 074464b937..b17a88d497 100644 --- a/discovery/networkservices-v1beta1.json +++ b/discovery/networkservices-v1beta1.json @@ -2483,7 +2483,7 @@ } } }, - "revision": "20240415", + "revision": "20240506", "rootUrl": "https://networkservices.googleapis.com/", "schemas": { "AuditConfig": { @@ -2764,7 +2764,7 @@ "type": "object" }, "Gateway": { - "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 32", + "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 33", "id": "Gateway", "properties": { "addresses": { diff --git a/src/apis/networkservices/v1.ts b/src/apis/networkservices/v1.ts index ecbb6b8214..11cf8d8882 100644 --- a/src/apis/networkservices/v1.ts +++ b/src/apis/networkservices/v1.ts @@ -340,7 +340,7 @@ export namespace networkservices_v1 { celExpression?: string | null; } /** - * Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 33 + * Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 32 */ export interface Schema$Gateway { /** diff --git a/src/apis/networkservices/v1beta1.ts b/src/apis/networkservices/v1beta1.ts index 298b9885c3..8169e026be 100644 --- a/src/apis/networkservices/v1beta1.ts +++ b/src/apis/networkservices/v1beta1.ts @@ -314,7 +314,7 @@ export namespace networkservices_v1beta1 { celExpression?: string | null; } /** - * Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 32 + * Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 33 */ export interface Schema$Gateway { /** From e1dfd88edcfb342c7458e208c1580641ce4934d6 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Fri, 10 May 2024 17:24:44 +0000 Subject: [PATCH 6/7] fix(run): update the API #### run:v2 The following keys were changed: - schemas.GoogleDevtoolsCloudbuildV1Results.properties.buildStepOutputs.description --- discovery/run-v2.json | 4 ++-- src/apis/run/v2.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/discovery/run-v2.json b/discovery/run-v2.json index 306572f8aa..59eaecaf51 100644 --- a/discovery/run-v2.json +++ b/discovery/run-v2.json @@ -1469,7 +1469,7 @@ } } }, - "revision": "20240426", + "revision": "20240503", "rootUrl": "https://run.googleapis.com/", "schemas": { "GoogleCloudRunV2BinaryAuthorization": { @@ -4681,7 +4681,7 @@ "type": "array" }, "buildStepOutputs": { - "description": "List of build step outputs, produced by builder images, in the order corresponding to build step indices. [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders) can produce this output by writing to `$BUILDER_OUTPUT/output`. Only the first 50KB of data is stored.", + "description": "List of build step outputs, produced by builder images, in the order corresponding to build step indices. [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders) can produce this output by writing to `$BUILDER_OUTPUT/output`. Only the first 50KB of data is stored. Note that the `$BUILDER_OUTPUT` variable is read-only and can't be substituted.", "items": { "format": "byte", "type": "string" diff --git a/src/apis/run/v2.ts b/src/apis/run/v2.ts index dadee4d780..5b683dd66e 100644 --- a/src/apis/run/v2.ts +++ b/src/apis/run/v2.ts @@ -2214,7 +2214,7 @@ export namespace run_v2 { */ buildStepImages?: string[] | null; /** - * List of build step outputs, produced by builder images, in the order corresponding to build step indices. [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders) can produce this output by writing to `$BUILDER_OUTPUT/output`. Only the first 50KB of data is stored. + * List of build step outputs, produced by builder images, in the order corresponding to build step indices. [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders) can produce this output by writing to `$BUILDER_OUTPUT/output`. Only the first 50KB of data is stored. Note that the `$BUILDER_OUTPUT` variable is read-only and can't be substituted. */ buildStepOutputs?: string[] | null; /** From 7a105041782ccfce99b2c6840d9e5de833b62844 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Fri, 10 May 2024 17:24:44 +0000 Subject: [PATCH 7/7] feat: regenerate index files --- discovery/monitoring-v1.json | 101 ++++++++++++++++++++++++++++++++++- discovery/pubsub-v1.json | 8 +-- 2 files changed, 105 insertions(+), 4 deletions(-) diff --git a/discovery/monitoring-v1.json b/discovery/monitoring-v1.json index 01510242b7..e99048d4e7 100644 --- a/discovery/monitoring-v1.json +++ b/discovery/monitoring-v1.json @@ -753,7 +753,7 @@ } } }, - "revision": "20240427", + "revision": "20240505", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -1037,6 +1037,10 @@ "description": "A Google Stackdriver dashboard. Dashboards define the content and layout of pages in the Stackdriver web application.", "id": "Dashboard", "properties": { + "annotations": { + "$ref": "DashboardAnnotations", + "description": "Configuration for event annotations to display on this dashboard." + }, "columnLayout": { "$ref": "ColumnLayout", "description": "The content is divided into equally spaced columns and the widgets are arranged vertically." @@ -1082,6 +1086,27 @@ }, "type": "object" }, + "DashboardAnnotations": { + "description": "Dashboard-level configuration for annotations", + "id": "DashboardAnnotations", + "properties": { + "defaultResourceNames": { + "description": "Dashboard level defaults for names of logging resources to search for events. Currently only projects are supported. Each individual EventAnnotation may have its own overrides. If both this field and the per annotation field is empty, then the scoping project is used. Limit: 50 projects. For example: “projects/some-project-id” ", + "items": { + "type": "string" + }, + "type": "array" + }, + "eventAnnotations": { + "description": "List of annotation configurations for this dashboard. Each entry specifies one event type.", + "items": { + "$ref": "EventAnnotation" + }, + "type": "array" + } + }, + "type": "object" + }, "DashboardFilter": { "description": "A filter to reduce the amount of data charted in relevant widgets.", "id": "DashboardFilter", @@ -1301,6 +1326,80 @@ }, "type": "object" }, + "EventAnnotation": { + "description": "Annotation configuration for one event type on a dashboard", + "id": "EventAnnotation", + "properties": { + "displayName": { + "description": "Solely for UI display. Should not be used programmatically.", + "type": "string" + }, + "enabled": { + "description": "Whether or not to show the events on the dashboard by default", + "type": "boolean" + }, + "eventType": { + "description": "The type of event to display.", + "enum": [ + "EVENT_TYPE_UNSPECIFIED", + "GKE_WORKLOAD_DEPLOYMENT", + "GKE_POD_CRASH", + "GKE_POD_UNSCHEDULABLE", + "GKE_CONTAINER_CREATION_FAILED", + "GKE_CLUSTER_CREATE_DELETE", + "GKE_CLUSTER_UPDATE", + "GKE_NODE_POOL_UPDATE", + "GKE_CLUSTER_AUTOSCALER", + "GKE_POD_AUTOSCALER", + "VM_TERMINATION", + "VM_GUEST_OS_ERROR", + "VM_START_FAILED", + "MIG_UPDATE", + "MIG_AUTOSCALER", + "CLOUD_RUN_DEPLOYMENT", + "CLOUD_SQL_FAILOVER", + "CLOUD_SQL_START_STOP", + "CLOUD_SQL_STORAGE", + "UPTIME_CHECK_FAILURE" + ], + "enumDescriptions": [ + "No event type specified.", + "Patch/update of GKE workload.", + "Crash events for a GKE Pod.", + "Scheduling failures for GKE Pods.", + "Failure to create a GKE container.", + "Create/delete of a GKE cluster.", + "Update of a GKE cluster.", + "Update of a GKE node pool.", + "GKE cluster autoscaler event.", + "GKE pod autoscaler event.", + "Termination of a virtual machine.", + "Guest OS error on a virtual machine.", + "Start failure on a virtual machine.", + "Update of a managed instance group.", + "Autoscaler event for a managed instance group.", + "New deployment of a Cloud Run service.", + "Failover of a Cloud SQL instance.", + "Start/stop of a Cloud SQL instance.", + "Storage event for a Cloud SQL instance.", + "Failure of a Cloud Monitoring uptime check." + ], + "type": "string" + }, + "filter": { + "description": "string filtering the events - event dependant. Example values: \"resource.labels.pod_name = 'pod-1'\" \"protoPayload.authenticationInfo.principalEmail='user@example.com'\" ", + "type": "string" + }, + "resourceNames": { + "description": "Per annotation level override for the names of logging resources to search for events. Currently only projects are supported. If both this field and the per annotation field is empty, it will default to the host project. Limit: 50 projects. For example: “projects/another-project-id” ", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Field": { "description": "A single field of a message type.", "id": "Field", diff --git a/discovery/pubsub-v1.json b/discovery/pubsub-v1.json index 0604aaba11..29468ecb55 100644 --- a/discovery/pubsub-v1.json +++ b/discovery/pubsub-v1.json @@ -1583,7 +1583,7 @@ } } }, - "revision": "20240416", + "revision": "20240430", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { @@ -1774,14 +1774,16 @@ "ACTIVE", "PERMISSION_DENIED", "NOT_FOUND", - "IN_TRANSIT_LOCATION_RESTRICTION" + "IN_TRANSIT_LOCATION_RESTRICTION", + "SCHEMA_MISMATCH" ], "enumDescriptions": [ "Default value. This value is unused.", "The subscription can actively send messages to Cloud Storage.", "Cannot write to the Cloud Storage bucket because of permission denied errors.", "Cannot write to the Cloud Storage bucket because it does not exist.", - "Cannot write to the destination because enforce_in_transit is set to true and the destination locations are not in the allowed regions." + "Cannot write to the destination because enforce_in_transit is set to true and the destination locations are not in the allowed regions.", + "Cannot write to the Cloud Storage bucket due to an incompatibility between the topic schema and subscription settings." ], "readOnly": true, "type": "string"