diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/CHANGELOG.md b/sdk/resourcemanager/machinelearning/armmachinelearning/CHANGELOG.md index 3afb25d50b12..564a47c406ff 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/CHANGELOG.md +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/CHANGELOG.md @@ -1,5 +1,614 @@ # Release History +## 4.0.0-beta.1 (2023-06-14) +### Breaking Changes + +- Function `*WorkspaceConnectionsClient.Create` parameter(s) have been changed from `(context.Context, string, string, string, WorkspaceConnectionPropertiesV2BasicResource, *WorkspaceConnectionsClientCreateOptions)` to `(context.Context, string, string, string, *WorkspaceConnectionsClientCreateOptions)` +- Type of `AmlOperation.Display` has been changed from `*AmlOperationDisplay` to `*OperationDisplay` +- Type of `ComputeStartStopSchedule.Cron` has been changed from `*CronTrigger` to `*Cron` +- Type of `ComputeStartStopSchedule.Recurrence` has been changed from `*RecurrenceTrigger` to `*Recurrence` +- Type of `EncryptionProperty.KeyVaultProperties` has been changed from `*EncryptionKeyVaultProperties` to `*KeyVaultProperties` +- Type of `ExternalFQDNResponse.Value` has been changed from `[]*FQDNEndpoints` to `[]*FQDNEndpointsPropertyBag` +- Type of `PrivateEndpointConnectionProperties.PrivateEndpoint` has been changed from `*PrivateEndpoint` to `*WorkspacePrivateEndpointResource` +- Type of `PrivateLinkServiceConnectionState.Status` has been changed from `*PrivateEndpointServiceConnectionStatus` to `*EndpointServiceConnectionStatus` +- Type of `SharedPrivateLinkResourceProperty.Status` has been changed from `*PrivateEndpointServiceConnectionStatus` to `*EndpointServiceConnectionStatus` +- Type of `WorkspaceProperties.PublicNetworkAccess` has been changed from `*PublicNetworkAccess` to `*PublicNetworkAccessType` +- Type of `WorkspacePropertiesUpdateParameters.PublicNetworkAccess` has been changed from `*PublicNetworkAccess` to `*PublicNetworkAccessType` +- Enum `PrivateEndpointServiceConnectionStatus` has been removed +- Enum `PublicNetworkAccess` has been removed +- Enum `ValueFormat` has been removed +- Operation `*PrivateLinkResourcesClient.List` has supported pagination, use `*PrivateLinkResourcesClient.NewListPager` instead. +- Struct `AmlOperationDisplay` has been removed +- Struct `EncryptionKeyVaultProperties` has been removed +- Struct `FQDNEndpointsProperties` has been removed +- Field `Properties` of struct `FQDNEndpoints` has been removed +- Field `UserStorageResourceID` of struct `ListWorkspaceKeysResult` has been removed +- Field `Value`, `ValueFormat` of struct `ManagedIdentityAuthTypeWorkspaceConnectionProperties` has been removed +- Field `Value`, `ValueFormat` of struct `NoneAuthTypeWorkspaceConnectionProperties` has been removed +- Field `Value`, `ValueFormat` of struct `PATAuthTypeWorkspaceConnectionProperties` has been removed +- Field `SubnetArmID` of struct `PrivateEndpoint` has been removed +- Field `Value`, `ValueFormat` of struct `SASAuthTypeWorkspaceConnectionProperties` has been removed +- Field `Value`, `ValueFormat` of struct `UsernamePasswordAuthTypeWorkspaceConnectionProperties` has been removed +- Field `Value`, `ValueFormat` of struct `WorkspaceConnectionPropertiesV2` has been removed +- Field `Parameters` of struct `WorkspacesClientBeginDiagnoseOptions` has been removed + +### Features Added + +- New value `ConnectionAuthTypeAPIKey`, `ConnectionAuthTypeAccessKey`, `ConnectionAuthTypeCustomKeys`, `ConnectionAuthTypeServicePrincipal` added to enum type `ConnectionAuthType` +- New value `ConnectionCategoryADLSGen2`, `ConnectionCategoryAPIKey`, `ConnectionCategoryAzureMySQLDb`, `ConnectionCategoryAzureOpenAI`, `ConnectionCategoryAzurePostgresDb`, `ConnectionCategoryAzureSQLDb`, `ConnectionCategoryAzureSynapseAnalytics`, `ConnectionCategoryCognitiveSearch`, `ConnectionCategoryCognitiveService`, `ConnectionCategoryCustomKeys`, `ConnectionCategoryRedis`, `ConnectionCategoryS3`, `ConnectionCategorySnowflake` added to enum type `ConnectionCategory` +- New value `ContainerTypeModelDataCollector` added to enum type `ContainerType` +- New value `CredentialsTypeKerberosKeytab`, `CredentialsTypeKerberosPassword` added to enum type `CredentialsType` +- New value `DatastoreTypeHdfs`, `DatastoreTypeOneLake` added to enum type `DatastoreType` +- New value `DistributionTypeRay` added to enum type `DistributionType` +- New value `JobStatusScheduled` added to enum type `JobStatus` +- New value `JobTypeLabeling`, `JobTypeSpark` added to enum type `JobType` +- New value `OutputDeliveryModeDirect` added to enum type `OutputDeliveryMode` +- New value `ScheduleActionTypeCreateMonitor`, `ScheduleActionTypeImportData` added to enum type `ScheduleActionType` +- New value `SecretsTypeKerberosKeytab`, `SecretsTypeKerberosPassword` added to enum type `SecretsType` +- New enum type `AssetProvisioningState` with values `AssetProvisioningStateCanceled`, `AssetProvisioningStateCreating`, `AssetProvisioningStateDeleting`, `AssetProvisioningStateFailed`, `AssetProvisioningStateSucceeded`, `AssetProvisioningStateUpdating` +- New enum type `AutoDeleteCondition` with values `AutoDeleteConditionCreatedGreaterThan`, `AutoDeleteConditionLastAccessedGreaterThan` +- New enum type `BaseEnvironmentSourceType` with values `BaseEnvironmentSourceTypeEnvironmentAsset` +- New enum type `BatchDeploymentConfigurationType` with values `BatchDeploymentConfigurationTypeModel`, `BatchDeploymentConfigurationTypePipelineComponent` +- New enum type `CategoricalDataDriftMetric` with values `CategoricalDataDriftMetricJensenShannonDistance`, `CategoricalDataDriftMetricPearsonsChiSquaredTest`, `CategoricalDataDriftMetricPopulationStabilityIndex` +- New enum type `CategoricalDataQualityMetric` with values `CategoricalDataQualityMetricDataTypeErrorRate`, `CategoricalDataQualityMetricNullValueRate`, `CategoricalDataQualityMetricOutOfBoundsRate` +- New enum type `CategoricalPredictionDriftMetric` with values `CategoricalPredictionDriftMetricJensenShannonDistance`, `CategoricalPredictionDriftMetricPearsonsChiSquaredTest`, `CategoricalPredictionDriftMetricPopulationStabilityIndex` +- New enum type `ClassificationModelPerformanceMetric` with values `ClassificationModelPerformanceMetricAccuracy`, `ClassificationModelPerformanceMetricPrecision`, `ClassificationModelPerformanceMetricRecall` +- New enum type `DataCollectionMode` with values `DataCollectionModeDisabled`, `DataCollectionModeEnabled` +- New enum type `DataImportSourceType` with values `DataImportSourceTypeDatabase`, `DataImportSourceTypeFileSystem` +- New enum type `EmailNotificationEnableType` with values `EmailNotificationEnableTypeJobCancelled`, `EmailNotificationEnableTypeJobCompleted`, `EmailNotificationEnableTypeJobFailed` +- New enum type `EndpointServiceConnectionStatus` with values `EndpointServiceConnectionStatusApproved`, `EndpointServiceConnectionStatusDisconnected`, `EndpointServiceConnectionStatusPending`, `EndpointServiceConnectionStatusRejected`, `EndpointServiceConnectionStatusTimeout` +- New enum type `EnvironmentVariableType` with values `EnvironmentVariableTypeLocal` +- New enum type `ExportFormatType` with values `ExportFormatTypeCSV`, `ExportFormatTypeCoco`, `ExportFormatTypeDataset` +- New enum type `FeatureAttributionMetric` with values `FeatureAttributionMetricNormalizedDiscountedCumulativeGain` +- New enum type `FeatureDataType` with values `FeatureDataTypeBinary`, `FeatureDataTypeBoolean`, `FeatureDataTypeDatetime`, `FeatureDataTypeDouble`, `FeatureDataTypeFloat`, `FeatureDataTypeInteger`, `FeatureDataTypeLong`, `FeatureDataTypeString` +- New enum type `FeaturestoreJobType` with values `FeaturestoreJobTypeBackfillMaterialization`, `FeaturestoreJobTypeRecurrentMaterialization` +- New enum type `GenerationSafetyQualityMetric` with values `GenerationSafetyQualityMetricAcceptableCoherenceScorePerInstance`, `GenerationSafetyQualityMetricAcceptableFluencyScorePerInstance`, `GenerationSafetyQualityMetricAcceptableGroundednessScorePerInstance`, `GenerationSafetyQualityMetricAcceptableRelevanceScorePerInstance`, `GenerationSafetyQualityMetricAcceptableSimilarityScorePerInstance`, `GenerationSafetyQualityMetricAggregatedCoherencePassRate`, `GenerationSafetyQualityMetricAggregatedFluencyPassRate`, `GenerationSafetyQualityMetricAggregatedGroundednessPassRate`, `GenerationSafetyQualityMetricAggregatedRelevancePassRate`, `GenerationSafetyQualityMetricAggregatedSimilarityPassRate` +- New enum type `GenerationTokenStatisticsMetric` with values `GenerationTokenStatisticsMetricTotalTokenCount`, `GenerationTokenStatisticsMetricTotalTokenCountPerGroup` +- New enum type `ImageAnnotationType` with values `ImageAnnotationTypeBoundingBox`, `ImageAnnotationTypeClassification`, `ImageAnnotationTypeInstanceSegmentation` +- New enum type `ImageType` with values `ImageTypeAzureml`, `ImageTypeDocker` +- New enum type `IncrementalDataRefresh` with values `IncrementalDataRefreshDisabled`, `IncrementalDataRefreshEnabled` +- New enum type `InferencingServerType` with values `InferencingServerTypeAzureMLBatch`, `InferencingServerTypeAzureMLOnline`, `InferencingServerTypeCustom`, `InferencingServerTypeTriton` +- New enum type `InputPathType` with values `InputPathTypePathID`, `InputPathTypePathVersion`, `InputPathTypeURL` +- New enum type `IsolationMode` with values `IsolationModeAllowInternetOutbound`, `IsolationModeAllowOnlyApprovedOutbound`, `IsolationModeDisabled` +- New enum type `JobProvisioningState` with values `JobProvisioningStateCanceled`, `JobProvisioningStateFailed`, `JobProvisioningStateInProgress`, `JobProvisioningStateSucceeded` +- New enum type `JobTier` with values `JobTierBasic`, `JobTierNull`, `JobTierPremium`, `JobTierSpot`, `JobTierStandard` +- New enum type `LogTrainingMetrics` with values `LogTrainingMetricsDisable`, `LogTrainingMetricsEnable` +- New enum type `LogValidationLoss` with values `LogValidationLossDisable`, `LogValidationLossEnable` +- New enum type `MLAssistConfigurationType` with values `MLAssistConfigurationTypeDisabled`, `MLAssistConfigurationTypeEnabled` +- New enum type `MLFlowAutologgerState` with values `MLFlowAutologgerStateDisabled`, `MLFlowAutologgerStateEnabled` +- New enum type `ManagedNetworkStatus` with values `ManagedNetworkStatusActive`, `ManagedNetworkStatusInactive` +- New enum type `MaterializationStoreType` with values `MaterializationStoreTypeNone`, `MaterializationStoreTypeOffline`, `MaterializationStoreTypeOnline`, `MaterializationStoreTypeOnlineAndOffline` +- New enum type `MediaType` with values `MediaTypeImage`, `MediaTypeText` +- New enum type `MlflowAutologger` with values `MlflowAutologgerDisabled`, `MlflowAutologgerEnabled` +- New enum type `ModelTaskType` with values `ModelTaskTypeClassification`, `ModelTaskTypeQuestionAnswering`, `ModelTaskTypeRegression` +- New enum type `MonitorComputeIdentityType` with values `MonitorComputeIdentityTypeAmlToken`, `MonitorComputeIdentityTypeManagedIdentity` +- New enum type `MonitorComputeType` with values `MonitorComputeTypeServerlessSpark` +- New enum type `MonitoringAlertNotificationType` with values `MonitoringAlertNotificationTypeAzureMonitor`, `MonitoringAlertNotificationTypeEmail` +- New enum type `MonitoringFeatureDataType` with values `MonitoringFeatureDataTypeCategorical`, `MonitoringFeatureDataTypeNumerical` +- New enum type `MonitoringFeatureFilterType` with values `MonitoringFeatureFilterTypeAllFeatures`, `MonitoringFeatureFilterTypeFeatureSubset`, `MonitoringFeatureFilterTypeTopNByAttribution` +- New enum type `MonitoringInputDataType` with values `MonitoringInputDataTypeFixed`, `MonitoringInputDataTypeStatic`, `MonitoringInputDataTypeTrailing` +- New enum type `MonitoringModelType` with values `MonitoringModelTypeClassification`, `MonitoringModelTypeRegression` +- New enum type `MonitoringNotificationMode` with values `MonitoringNotificationModeDisabled`, `MonitoringNotificationModeEnabled` +- New enum type `MonitoringSignalType` with values `MonitoringSignalTypeCustom`, `MonitoringSignalTypeDataDrift`, `MonitoringSignalTypeDataQuality`, `MonitoringSignalTypeFeatureAttributionDrift`, `MonitoringSignalTypeGenerationSafetyQuality`, `MonitoringSignalTypeGenerationTokenStatistics`, `MonitoringSignalTypeModelPerformance`, `MonitoringSignalTypePredictionDrift` +- New enum type `MultiSelect` with values `MultiSelectDisabled`, `MultiSelectEnabled` +- New enum type `NlpLearningRateScheduler` with values `NlpLearningRateSchedulerConstant`, `NlpLearningRateSchedulerConstantWithWarmup`, `NlpLearningRateSchedulerCosine`, `NlpLearningRateSchedulerCosineWithRestarts`, `NlpLearningRateSchedulerLinear`, `NlpLearningRateSchedulerNone`, `NlpLearningRateSchedulerPolynomial` +- New enum type `NodesValueType` with values `NodesValueTypeAll`, `NodesValueTypeCustom` +- New enum type `NumericalDataDriftMetric` with values `NumericalDataDriftMetricJensenShannonDistance`, `NumericalDataDriftMetricNormalizedWassersteinDistance`, `NumericalDataDriftMetricPopulationStabilityIndex`, `NumericalDataDriftMetricTwoSampleKolmogorovSmirnovTest` +- New enum type `NumericalDataQualityMetric` with values `NumericalDataQualityMetricDataTypeErrorRate`, `NumericalDataQualityMetricNullValueRate`, `NumericalDataQualityMetricOutOfBoundsRate` +- New enum type `NumericalPredictionDriftMetric` with values `NumericalPredictionDriftMetricJensenShannonDistance`, `NumericalPredictionDriftMetricNormalizedWassersteinDistance`, `NumericalPredictionDriftMetricPopulationStabilityIndex`, `NumericalPredictionDriftMetricTwoSampleKolmogorovSmirnovTest` +- New enum type `OneLakeArtifactType` with values `OneLakeArtifactTypeLakeHouse` +- New enum type `PackageBuildState` with values `PackageBuildStateFailed`, `PackageBuildStateNotStarted`, `PackageBuildStateRunning`, `PackageBuildStateSucceeded` +- New enum type `PackageInputDeliveryMode` with values `PackageInputDeliveryModeCopy`, `PackageInputDeliveryModeDownload` +- New enum type `PackageInputType` with values `PackageInputTypeURIFile`, `PackageInputTypeURIFolder` +- New enum type `PendingUploadCredentialType` with values `PendingUploadCredentialTypeSAS` +- New enum type `PendingUploadType` with values `PendingUploadTypeNone`, `PendingUploadTypeTemporaryBlobReference` +- New enum type `ProtectionLevel` with values `ProtectionLevelAll`, `ProtectionLevelNone` +- New enum type `Protocol` with values `ProtocolHTTP`, `ProtocolTCP`, `ProtocolUDP` +- New enum type `RegressionModelPerformanceMetric` with values `RegressionModelPerformanceMetricMeanAbsoluteError`, `RegressionModelPerformanceMetricMeanSquaredError`, `RegressionModelPerformanceMetricRootMeanSquaredError` +- New enum type `RollingRateType` with values `RollingRateTypeDay`, `RollingRateTypeHour`, `RollingRateTypeMinute`, `RollingRateTypeMonth`, `RollingRateTypeYear` +- New enum type `RuleAction` with values `RuleActionAllow`, `RuleActionDeny` +- New enum type `RuleCategory` with values `RuleCategoryRecommended`, `RuleCategoryRequired`, `RuleCategoryUserDefined` +- New enum type `RuleStatus` with values `RuleStatusActive`, `RuleStatusInactive` +- New enum type `RuleType` with values `RuleTypeFQDN`, `RuleTypePrivateEndpoint`, `RuleTypeServiceTag` +- New enum type `SparkJobEntryType` with values `SparkJobEntryTypeSparkJobPythonEntry`, `SparkJobEntryTypeSparkJobScalaEntry` +- New enum type `StatusMessageLevel` with values `StatusMessageLevelError`, `StatusMessageLevelInformation`, `StatusMessageLevelWarning` +- New enum type `TextAnnotationType` with values `TextAnnotationTypeClassification`, `TextAnnotationTypeNamedEntityRecognition` +- New enum type `TrainingMode` with values `TrainingModeAuto`, `TrainingModeDistributed`, `TrainingModeNonDistributed` +- New enum type `VolumeDefinitionType` with values `VolumeDefinitionTypeBind`, `VolumeDefinitionTypeNpipe`, `VolumeDefinitionTypeTmpfs`, `VolumeDefinitionTypeVolume` +- New enum type `WebhookType` with values `WebhookTypeAzureDevOps` +- New function `*APIKeyAuthWorkspaceConnectionProperties.GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2` +- New function `*AccessKeyAuthTypeWorkspaceConnectionProperties.GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2` +- New function `*AllFeatures.GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase` +- New function `*AllNodes.GetNodes() *Nodes` +- New function `*AmlTokenComputeIdentity.GetMonitorComputeIdentityBase() *MonitorComputeIdentityBase` +- New function `*AzMonMonitoringAlertNotificationSettings.GetMonitoringAlertNotificationSettingsBase() *MonitoringAlertNotificationSettingsBase` +- New function `*AzureDevOpsWebhook.GetWebhook() *Webhook` +- New function `*AzureMLBatchInferencingServer.GetInferencingServer() *InferencingServer` +- New function `*AzureMLOnlineInferencingServer.GetInferencingServer() *InferencingServer` +- New function `*BaseEnvironmentID.GetBaseEnvironmentSource() *BaseEnvironmentSource` +- New function `*BaseEnvironmentSource.GetBaseEnvironmentSource() *BaseEnvironmentSource` +- New function `*BatchDeploymentConfiguration.GetBatchDeploymentConfiguration() *BatchDeploymentConfiguration` +- New function `*BatchPipelineComponentDeploymentConfiguration.GetBatchDeploymentConfiguration() *BatchDeploymentConfiguration` +- New function `*CSVExportSummary.GetExportSummary() *ExportSummary` +- New function `*CategoricalDataDriftMetricThreshold.GetDataDriftMetricThresholdBase() *DataDriftMetricThresholdBase` +- New function `*CategoricalDataQualityMetricThreshold.GetDataQualityMetricThresholdBase() *DataQualityMetricThresholdBase` +- New function `*CategoricalPredictionDriftMetricThreshold.GetPredictionDriftMetricThresholdBase() *PredictionDriftMetricThresholdBase` +- New function `*ClassificationModelPerformanceMetricThreshold.GetModelPerformanceMetricThresholdBase() *ModelPerformanceMetricThresholdBase` +- New function `*ClientFactory.NewFeaturesClient() *FeaturesClient` +- New function `*ClientFactory.NewFeaturesetContainersClient() *FeaturesetContainersClient` +- New function `*ClientFactory.NewFeaturesetVersionsClient() *FeaturesetVersionsClient` +- New function `*ClientFactory.NewFeaturestoreEntityContainersClient() *FeaturestoreEntityContainersClient` +- New function `*ClientFactory.NewFeaturestoreEntityVersionsClient() *FeaturestoreEntityVersionsClient` +- New function `*ClientFactory.NewLabelingJobsClient() *LabelingJobsClient` +- New function `*ClientFactory.NewManagedNetworkProvisionsClient() *ManagedNetworkProvisionsClient` +- New function `*ClientFactory.NewManagedNetworkSettingsRuleClient() *ManagedNetworkSettingsRuleClient` +- New function `*ClientFactory.NewRegistriesClient() *RegistriesClient` +- New function `*ClientFactory.NewRegistryCodeContainersClient() *RegistryCodeContainersClient` +- New function `*ClientFactory.NewRegistryCodeVersionsClient() *RegistryCodeVersionsClient` +- New function `*ClientFactory.NewRegistryComponentContainersClient() *RegistryComponentContainersClient` +- New function `*ClientFactory.NewRegistryComponentVersionsClient() *RegistryComponentVersionsClient` +- New function `*ClientFactory.NewRegistryDataContainersClient() *RegistryDataContainersClient` +- New function `*ClientFactory.NewRegistryDataVersionsClient() *RegistryDataVersionsClient` +- New function `*ClientFactory.NewRegistryEnvironmentContainersClient() *RegistryEnvironmentContainersClient` +- New function `*ClientFactory.NewRegistryEnvironmentVersionsClient() *RegistryEnvironmentVersionsClient` +- New function `*ClientFactory.NewRegistryModelContainersClient() *RegistryModelContainersClient` +- New function `*ClientFactory.NewRegistryModelVersionsClient() *RegistryModelVersionsClient` +- New function `*CocoExportSummary.GetExportSummary() *ExportSummary` +- New function `*CodeVersionsClient.CreateOrGetStartPendingUpload(context.Context, string, string, string, string, PendingUploadRequestDto, *CodeVersionsClientCreateOrGetStartPendingUploadOptions) (CodeVersionsClientCreateOrGetStartPendingUploadResponse, error)` +- New function `*ComputeClient.UpdateCustomServices(context.Context, string, string, string, []*CustomService, *ComputeClientUpdateCustomServicesOptions) (ComputeClientUpdateCustomServicesResponse, error)` +- New function `*ComputeClient.UpdateIdleShutdownSetting(context.Context, string, string, string, IdleShutdownSetting, *ComputeClientUpdateIdleShutdownSettingOptions) (ComputeClientUpdateIdleShutdownSettingResponse, error)` +- New function `*CreateMonitorAction.GetScheduleActionBase() *ScheduleActionBase` +- New function `*CustomInferencingServer.GetInferencingServer() *InferencingServer` +- New function `*CustomKeysWorkspaceConnectionProperties.GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2` +- New function `*CustomMonitoringSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*DataDriftMetricThresholdBase.GetDataDriftMetricThresholdBase() *DataDriftMetricThresholdBase` +- New function `*DataDriftMonitoringSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*DataImport.GetDataVersionBaseProperties() *DataVersionBaseProperties` +- New function `*DataImportSource.GetDataImportSource() *DataImportSource` +- New function `*DataQualityMetricThresholdBase.GetDataQualityMetricThresholdBase() *DataQualityMetricThresholdBase` +- New function `*DataQualityMonitoringSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*DatabaseSource.GetDataImportSource() *DataImportSource` +- New function `*DatasetExportSummary.GetExportSummary() *ExportSummary` +- New function `*EmailMonitoringAlertNotificationSettings.GetMonitoringAlertNotificationSettingsBase() *MonitoringAlertNotificationSettingsBase` +- New function `*ExportSummary.GetExportSummary() *ExportSummary` +- New function `*FeatureAttributionDriftMonitoringSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*FeatureSubset.GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase` +- New function `NewFeaturesClient(string, azcore.TokenCredential, *arm.ClientOptions) (*FeaturesClient, error)` +- New function `*FeaturesClient.Get(context.Context, string, string, string, string, string, *FeaturesClientGetOptions) (FeaturesClientGetResponse, error)` +- New function `*FeaturesClient.NewListPager(string, string, string, string, *FeaturesClientListOptions) *runtime.Pager[FeaturesClientListResponse]` +- New function `NewFeaturesetContainersClient(string, azcore.TokenCredential, *arm.ClientOptions) (*FeaturesetContainersClient, error)` +- New function `*FeaturesetContainersClient.BeginCreateOrUpdate(context.Context, string, string, string, FeaturesetContainer, *FeaturesetContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturesetContainersClientCreateOrUpdateResponse], error)` +- New function `*FeaturesetContainersClient.BeginDelete(context.Context, string, string, string, *FeaturesetContainersClientBeginDeleteOptions) (*runtime.Poller[FeaturesetContainersClientDeleteResponse], error)` +- New function `*FeaturesetContainersClient.GetEntity(context.Context, string, string, string, *FeaturesetContainersClientGetEntityOptions) (FeaturesetContainersClientGetEntityResponse, error)` +- New function `*FeaturesetContainersClient.NewListPager(string, string, *FeaturesetContainersClientListOptions) *runtime.Pager[FeaturesetContainersClientListResponse]` +- New function `NewFeaturesetVersionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*FeaturesetVersionsClient, error)` +- New function `*FeaturesetVersionsClient.BeginCreateOrUpdate(context.Context, string, string, string, string, FeaturesetVersion, *FeaturesetVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturesetVersionsClientCreateOrUpdateResponse], error)` +- New function `*FeaturesetVersionsClient.BeginDelete(context.Context, string, string, string, string, *FeaturesetVersionsClientBeginDeleteOptions) (*runtime.Poller[FeaturesetVersionsClientDeleteResponse], error)` +- New function `*FeaturesetVersionsClient.Get(context.Context, string, string, string, string, *FeaturesetVersionsClientGetOptions) (FeaturesetVersionsClientGetResponse, error)` +- New function `*FeaturesetVersionsClient.NewListMaterializationJobsPager(string, string, string, string, *FeaturesetVersionsClientListMaterializationJobsOptions) *runtime.Pager[FeaturesetVersionsClientListMaterializationJobsResponse]` +- New function `*FeaturesetVersionsClient.NewListPager(string, string, string, *FeaturesetVersionsClientListOptions) *runtime.Pager[FeaturesetVersionsClientListResponse]` +- New function `*FeaturesetVersionsClient.BeginBackfill(context.Context, string, string, string, string, FeaturesetVersionBackfillRequest, *FeaturesetVersionsClientBeginBackfillOptions) (*runtime.Poller[FeaturesetVersionsClientBackfillResponse], error)` +- New function `NewFeaturestoreEntityContainersClient(string, azcore.TokenCredential, *arm.ClientOptions) (*FeaturestoreEntityContainersClient, error)` +- New function `*FeaturestoreEntityContainersClient.BeginCreateOrUpdate(context.Context, string, string, string, FeaturestoreEntityContainer, *FeaturestoreEntityContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturestoreEntityContainersClientCreateOrUpdateResponse], error)` +- New function `*FeaturestoreEntityContainersClient.BeginDelete(context.Context, string, string, string, *FeaturestoreEntityContainersClientBeginDeleteOptions) (*runtime.Poller[FeaturestoreEntityContainersClientDeleteResponse], error)` +- New function `*FeaturestoreEntityContainersClient.GetEntity(context.Context, string, string, string, *FeaturestoreEntityContainersClientGetEntityOptions) (FeaturestoreEntityContainersClientGetEntityResponse, error)` +- New function `*FeaturestoreEntityContainersClient.NewListPager(string, string, *FeaturestoreEntityContainersClientListOptions) *runtime.Pager[FeaturestoreEntityContainersClientListResponse]` +- New function `NewFeaturestoreEntityVersionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*FeaturestoreEntityVersionsClient, error)` +- New function `*FeaturestoreEntityVersionsClient.BeginCreateOrUpdate(context.Context, string, string, string, string, FeaturestoreEntityVersion, *FeaturestoreEntityVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturestoreEntityVersionsClientCreateOrUpdateResponse], error)` +- New function `*FeaturestoreEntityVersionsClient.BeginDelete(context.Context, string, string, string, string, *FeaturestoreEntityVersionsClientBeginDeleteOptions) (*runtime.Poller[FeaturestoreEntityVersionsClientDeleteResponse], error)` +- New function `*FeaturestoreEntityVersionsClient.Get(context.Context, string, string, string, string, *FeaturestoreEntityVersionsClientGetOptions) (FeaturestoreEntityVersionsClientGetResponse, error)` +- New function `*FeaturestoreEntityVersionsClient.NewListPager(string, string, string, *FeaturestoreEntityVersionsClientListOptions) *runtime.Pager[FeaturestoreEntityVersionsClientListResponse]` +- New function `*FileSystemSource.GetDataImportSource() *DataImportSource` +- New function `*FixedInputData.GetMonitoringInputDataBase() *MonitoringInputDataBase` +- New function `*FqdnOutboundRule.GetOutboundRule() *OutboundRule` +- New function `*GenerationSafetyQualityMonitoringSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*GenerationTokenStatisticsSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*HdfsDatastore.GetDatastoreProperties() *DatastoreProperties` +- New function `*ImportDataAction.GetScheduleActionBase() *ScheduleActionBase` +- New function `*InferencingServer.GetInferencingServer() *InferencingServer` +- New function `*JobsClient.Update(context.Context, string, string, string, PartialJobBasePartialResource, *JobsClientUpdateOptions) (JobsClientUpdateResponse, error)` +- New function `*KerberosKeytabCredentials.GetDatastoreCredentials() *DatastoreCredentials` +- New function `*KerberosKeytabSecrets.GetDatastoreSecrets() *DatastoreSecrets` +- New function `*KerberosPasswordCredentials.GetDatastoreCredentials() *DatastoreCredentials` +- New function `*KerberosPasswordSecrets.GetDatastoreSecrets() *DatastoreSecrets` +- New function `*LabelingJobImageProperties.GetLabelingJobMediaProperties() *LabelingJobMediaProperties` +- New function `*LabelingJobMediaProperties.GetLabelingJobMediaProperties() *LabelingJobMediaProperties` +- New function `*LabelingJobProperties.GetJobBaseProperties() *JobBaseProperties` +- New function `*LabelingJobTextProperties.GetLabelingJobMediaProperties() *LabelingJobMediaProperties` +- New function `NewLabelingJobsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*LabelingJobsClient, error)` +- New function `*LabelingJobsClient.BeginCreateOrUpdate(context.Context, string, string, string, LabelingJob, *LabelingJobsClientBeginCreateOrUpdateOptions) (*runtime.Poller[LabelingJobsClientCreateOrUpdateResponse], error)` +- New function `*LabelingJobsClient.Delete(context.Context, string, string, string, *LabelingJobsClientDeleteOptions) (LabelingJobsClientDeleteResponse, error)` +- New function `*LabelingJobsClient.BeginExportLabels(context.Context, string, string, string, ExportSummaryClassification, *LabelingJobsClientBeginExportLabelsOptions) (*runtime.Poller[LabelingJobsClientExportLabelsResponse], error)` +- New function `*LabelingJobsClient.Get(context.Context, string, string, string, *LabelingJobsClientGetOptions) (LabelingJobsClientGetResponse, error)` +- New function `*LabelingJobsClient.NewListPager(string, string, *LabelingJobsClientListOptions) *runtime.Pager[LabelingJobsClientListResponse]` +- New function `*LabelingJobsClient.Pause(context.Context, string, string, string, *LabelingJobsClientPauseOptions) (LabelingJobsClientPauseResponse, error)` +- New function `*LabelingJobsClient.BeginResume(context.Context, string, string, string, *LabelingJobsClientBeginResumeOptions) (*runtime.Poller[LabelingJobsClientResumeResponse], error)` +- New function `*LakeHouseArtifact.GetOneLakeArtifact() *OneLakeArtifact` +- New function `*MLAssistConfiguration.GetMLAssistConfiguration() *MLAssistConfiguration` +- New function `*MLAssistConfigurationDisabled.GetMLAssistConfiguration() *MLAssistConfiguration` +- New function `*MLAssistConfigurationEnabled.GetMLAssistConfiguration() *MLAssistConfiguration` +- New function `*ManagedComputeIdentity.GetMonitorComputeIdentityBase() *MonitorComputeIdentityBase` +- New function `NewManagedNetworkProvisionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ManagedNetworkProvisionsClient, error)` +- New function `*ManagedNetworkProvisionsClient.BeginProvisionManagedNetwork(context.Context, string, string, *ManagedNetworkProvisionsClientBeginProvisionManagedNetworkOptions) (*runtime.Poller[ManagedNetworkProvisionsClientProvisionManagedNetworkResponse], error)` +- New function `NewManagedNetworkSettingsRuleClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ManagedNetworkSettingsRuleClient, error)` +- New function `*ManagedNetworkSettingsRuleClient.BeginCreateOrUpdate(context.Context, string, string, string, OutboundRuleBasicResource, *ManagedNetworkSettingsRuleClientBeginCreateOrUpdateOptions) (*runtime.Poller[ManagedNetworkSettingsRuleClientCreateOrUpdateResponse], error)` +- New function `*ManagedNetworkSettingsRuleClient.BeginDelete(context.Context, string, string, string, *ManagedNetworkSettingsRuleClientBeginDeleteOptions) (*runtime.Poller[ManagedNetworkSettingsRuleClientDeleteResponse], error)` +- New function `*ManagedNetworkSettingsRuleClient.Get(context.Context, string, string, string, *ManagedNetworkSettingsRuleClientGetOptions) (ManagedNetworkSettingsRuleClientGetResponse, error)` +- New function `*ManagedNetworkSettingsRuleClient.NewListPager(string, string, *ManagedNetworkSettingsRuleClientListOptions) *runtime.Pager[ManagedNetworkSettingsRuleClientListResponse]` +- New function `*ModelPerformanceMetricThresholdBase.GetModelPerformanceMetricThresholdBase() *ModelPerformanceMetricThresholdBase` +- New function `*ModelPerformanceSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*ModelVersionsClient.BeginPackage(context.Context, string, string, string, string, PackageRequest, *ModelVersionsClientBeginPackageOptions) (*runtime.Poller[ModelVersionsClientPackageResponse], error)` +- New function `*MonitorComputeConfigurationBase.GetMonitorComputeConfigurationBase() *MonitorComputeConfigurationBase` +- New function `*MonitorComputeIdentityBase.GetMonitorComputeIdentityBase() *MonitorComputeIdentityBase` +- New function `*MonitorServerlessSparkCompute.GetMonitorComputeConfigurationBase() *MonitorComputeConfigurationBase` +- New function `*MonitoringAlertNotificationSettingsBase.GetMonitoringAlertNotificationSettingsBase() *MonitoringAlertNotificationSettingsBase` +- New function `*MonitoringFeatureFilterBase.GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase` +- New function `*MonitoringInputDataBase.GetMonitoringInputDataBase() *MonitoringInputDataBase` +- New function `*MonitoringSignalBase.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*OneLakeArtifact.GetOneLakeArtifact() *OneLakeArtifact` +- New function `*OneLakeDatastore.GetDatastoreProperties() *DatastoreProperties` +- New function `*OutboundRule.GetOutboundRule() *OutboundRule` +- New function `*PackageInputPathBase.GetPackageInputPathBase() *PackageInputPathBase` +- New function `*PackageInputPathID.GetPackageInputPathBase() *PackageInputPathBase` +- New function `*PackageInputPathURL.GetPackageInputPathBase() *PackageInputPathBase` +- New function `*PackageInputPathVersion.GetPackageInputPathBase() *PackageInputPathBase` +- New function `*PendingUploadCredentialDto.GetPendingUploadCredentialDto() *PendingUploadCredentialDto` +- New function `*PredictionDriftMetricThresholdBase.GetPredictionDriftMetricThresholdBase() *PredictionDriftMetricThresholdBase` +- New function `*PredictionDriftMonitoringSignal.GetMonitoringSignalBase() *MonitoringSignalBase` +- New function `*PrivateEndpointOutboundRule.GetOutboundRule() *OutboundRule` +- New function `*Ray.GetDistributionConfiguration() *DistributionConfiguration` +- New function `NewRegistriesClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistriesClient, error)` +- New function `*RegistriesClient.BeginCreateOrUpdate(context.Context, string, string, Registry, *RegistriesClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistriesClientCreateOrUpdateResponse], error)` +- New function `*RegistriesClient.BeginDelete(context.Context, string, string, *RegistriesClientBeginDeleteOptions) (*runtime.Poller[RegistriesClientDeleteResponse], error)` +- New function `*RegistriesClient.Get(context.Context, string, string, *RegistriesClientGetOptions) (RegistriesClientGetResponse, error)` +- New function `*RegistriesClient.NewListBySubscriptionPager(*RegistriesClientListBySubscriptionOptions) *runtime.Pager[RegistriesClientListBySubscriptionResponse]` +- New function `*RegistriesClient.NewListPager(string, *RegistriesClientListOptions) *runtime.Pager[RegistriesClientListResponse]` +- New function `*RegistriesClient.BeginRemoveRegions(context.Context, string, string, Registry, *RegistriesClientBeginRemoveRegionsOptions) (*runtime.Poller[RegistriesClientRemoveRegionsResponse], error)` +- New function `*RegistriesClient.Update(context.Context, string, string, PartialRegistryPartialTrackedResource, *RegistriesClientUpdateOptions) (RegistriesClientUpdateResponse, error)` +- New function `NewRegistryCodeContainersClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryCodeContainersClient, error)` +- New function `*RegistryCodeContainersClient.BeginCreateOrUpdate(context.Context, string, string, string, CodeContainer, *RegistryCodeContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryCodeContainersClientCreateOrUpdateResponse], error)` +- New function `*RegistryCodeContainersClient.BeginDelete(context.Context, string, string, string, *RegistryCodeContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryCodeContainersClientDeleteResponse], error)` +- New function `*RegistryCodeContainersClient.Get(context.Context, string, string, string, *RegistryCodeContainersClientGetOptions) (RegistryCodeContainersClientGetResponse, error)` +- New function `*RegistryCodeContainersClient.NewListPager(string, string, *RegistryCodeContainersClientListOptions) *runtime.Pager[RegistryCodeContainersClientListResponse]` +- New function `NewRegistryCodeVersionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryCodeVersionsClient, error)` +- New function `*RegistryCodeVersionsClient.CreateOrGetStartPendingUpload(context.Context, string, string, string, string, PendingUploadRequestDto, *RegistryCodeVersionsClientCreateOrGetStartPendingUploadOptions) (RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse, error)` +- New function `*RegistryCodeVersionsClient.BeginCreateOrUpdate(context.Context, string, string, string, string, CodeVersion, *RegistryCodeVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryCodeVersionsClientCreateOrUpdateResponse], error)` +- New function `*RegistryCodeVersionsClient.BeginDelete(context.Context, string, string, string, string, *RegistryCodeVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryCodeVersionsClientDeleteResponse], error)` +- New function `*RegistryCodeVersionsClient.Get(context.Context, string, string, string, string, *RegistryCodeVersionsClientGetOptions) (RegistryCodeVersionsClientGetResponse, error)` +- New function `*RegistryCodeVersionsClient.NewListPager(string, string, string, *RegistryCodeVersionsClientListOptions) *runtime.Pager[RegistryCodeVersionsClientListResponse]` +- New function `NewRegistryComponentContainersClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryComponentContainersClient, error)` +- New function `*RegistryComponentContainersClient.BeginCreateOrUpdate(context.Context, string, string, string, ComponentContainer, *RegistryComponentContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryComponentContainersClientCreateOrUpdateResponse], error)` +- New function `*RegistryComponentContainersClient.BeginDelete(context.Context, string, string, string, *RegistryComponentContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryComponentContainersClientDeleteResponse], error)` +- New function `*RegistryComponentContainersClient.Get(context.Context, string, string, string, *RegistryComponentContainersClientGetOptions) (RegistryComponentContainersClientGetResponse, error)` +- New function `*RegistryComponentContainersClient.NewListPager(string, string, *RegistryComponentContainersClientListOptions) *runtime.Pager[RegistryComponentContainersClientListResponse]` +- New function `NewRegistryComponentVersionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryComponentVersionsClient, error)` +- New function `*RegistryComponentVersionsClient.BeginCreateOrUpdate(context.Context, string, string, string, string, ComponentVersion, *RegistryComponentVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryComponentVersionsClientCreateOrUpdateResponse], error)` +- New function `*RegistryComponentVersionsClient.BeginDelete(context.Context, string, string, string, string, *RegistryComponentVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryComponentVersionsClientDeleteResponse], error)` +- New function `*RegistryComponentVersionsClient.Get(context.Context, string, string, string, string, *RegistryComponentVersionsClientGetOptions) (RegistryComponentVersionsClientGetResponse, error)` +- New function `*RegistryComponentVersionsClient.NewListPager(string, string, string, *RegistryComponentVersionsClientListOptions) *runtime.Pager[RegistryComponentVersionsClientListResponse]` +- New function `NewRegistryDataContainersClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryDataContainersClient, error)` +- New function `*RegistryDataContainersClient.BeginCreateOrUpdate(context.Context, string, string, string, DataContainer, *RegistryDataContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryDataContainersClientCreateOrUpdateResponse], error)` +- New function `*RegistryDataContainersClient.BeginDelete(context.Context, string, string, string, *RegistryDataContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryDataContainersClientDeleteResponse], error)` +- New function `*RegistryDataContainersClient.Get(context.Context, string, string, string, *RegistryDataContainersClientGetOptions) (RegistryDataContainersClientGetResponse, error)` +- New function `*RegistryDataContainersClient.NewListPager(string, string, *RegistryDataContainersClientListOptions) *runtime.Pager[RegistryDataContainersClientListResponse]` +- New function `NewRegistryDataVersionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryDataVersionsClient, error)` +- New function `*RegistryDataVersionsClient.CreateOrGetStartPendingUpload(context.Context, string, string, string, string, PendingUploadRequestDto, *RegistryDataVersionsClientCreateOrGetStartPendingUploadOptions) (RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse, error)` +- New function `*RegistryDataVersionsClient.BeginCreateOrUpdate(context.Context, string, string, string, string, DataVersionBase, *RegistryDataVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryDataVersionsClientCreateOrUpdateResponse], error)` +- New function `*RegistryDataVersionsClient.BeginDelete(context.Context, string, string, string, string, *RegistryDataVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryDataVersionsClientDeleteResponse], error)` +- New function `*RegistryDataVersionsClient.Get(context.Context, string, string, string, string, *RegistryDataVersionsClientGetOptions) (RegistryDataVersionsClientGetResponse, error)` +- New function `*RegistryDataVersionsClient.NewListPager(string, string, string, *RegistryDataVersionsClientListOptions) *runtime.Pager[RegistryDataVersionsClientListResponse]` +- New function `NewRegistryEnvironmentContainersClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryEnvironmentContainersClient, error)` +- New function `*RegistryEnvironmentContainersClient.BeginCreateOrUpdate(context.Context, string, string, string, EnvironmentContainer, *RegistryEnvironmentContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryEnvironmentContainersClientCreateOrUpdateResponse], error)` +- New function `*RegistryEnvironmentContainersClient.BeginDelete(context.Context, string, string, string, *RegistryEnvironmentContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryEnvironmentContainersClientDeleteResponse], error)` +- New function `*RegistryEnvironmentContainersClient.Get(context.Context, string, string, string, *RegistryEnvironmentContainersClientGetOptions) (RegistryEnvironmentContainersClientGetResponse, error)` +- New function `*RegistryEnvironmentContainersClient.NewListPager(string, string, *RegistryEnvironmentContainersClientListOptions) *runtime.Pager[RegistryEnvironmentContainersClientListResponse]` +- New function `NewRegistryEnvironmentVersionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryEnvironmentVersionsClient, error)` +- New function `*RegistryEnvironmentVersionsClient.BeginCreateOrUpdate(context.Context, string, string, string, string, EnvironmentVersion, *RegistryEnvironmentVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryEnvironmentVersionsClientCreateOrUpdateResponse], error)` +- New function `*RegistryEnvironmentVersionsClient.BeginDelete(context.Context, string, string, string, string, *RegistryEnvironmentVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryEnvironmentVersionsClientDeleteResponse], error)` +- New function `*RegistryEnvironmentVersionsClient.Get(context.Context, string, string, string, string, *RegistryEnvironmentVersionsClientGetOptions) (RegistryEnvironmentVersionsClientGetResponse, error)` +- New function `*RegistryEnvironmentVersionsClient.NewListPager(string, string, string, *RegistryEnvironmentVersionsClientListOptions) *runtime.Pager[RegistryEnvironmentVersionsClientListResponse]` +- New function `NewRegistryModelContainersClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryModelContainersClient, error)` +- New function `*RegistryModelContainersClient.BeginCreateOrUpdate(context.Context, string, string, string, ModelContainer, *RegistryModelContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryModelContainersClientCreateOrUpdateResponse], error)` +- New function `*RegistryModelContainersClient.BeginDelete(context.Context, string, string, string, *RegistryModelContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryModelContainersClientDeleteResponse], error)` +- New function `*RegistryModelContainersClient.Get(context.Context, string, string, string, *RegistryModelContainersClientGetOptions) (RegistryModelContainersClientGetResponse, error)` +- New function `*RegistryModelContainersClient.NewListPager(string, string, *RegistryModelContainersClientListOptions) *runtime.Pager[RegistryModelContainersClientListResponse]` +- New function `NewRegistryModelVersionsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*RegistryModelVersionsClient, error)` +- New function `*RegistryModelVersionsClient.CreateOrGetStartPendingUpload(context.Context, string, string, string, string, PendingUploadRequestDto, *RegistryModelVersionsClientCreateOrGetStartPendingUploadOptions) (RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse, error)` +- New function `*RegistryModelVersionsClient.BeginCreateOrUpdate(context.Context, string, string, string, string, ModelVersion, *RegistryModelVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryModelVersionsClientCreateOrUpdateResponse], error)` +- New function `*RegistryModelVersionsClient.BeginDelete(context.Context, string, string, string, string, *RegistryModelVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryModelVersionsClientDeleteResponse], error)` +- New function `*RegistryModelVersionsClient.Get(context.Context, string, string, string, string, *RegistryModelVersionsClientGetOptions) (RegistryModelVersionsClientGetResponse, error)` +- New function `*RegistryModelVersionsClient.NewListPager(string, string, string, *RegistryModelVersionsClientListOptions) *runtime.Pager[RegistryModelVersionsClientListResponse]` +- New function `*RegistryModelVersionsClient.BeginPackage(context.Context, string, string, string, string, PackageRequest, *RegistryModelVersionsClientBeginPackageOptions) (*runtime.Poller[RegistryModelVersionsClientPackageResponse], error)` +- New function `*RegressionModelPerformanceMetricThreshold.GetModelPerformanceMetricThresholdBase() *ModelPerformanceMetricThresholdBase` +- New function `*SASCredentialDto.GetPendingUploadCredentialDto() *PendingUploadCredentialDto` +- New function `*ServicePrincipalAuthTypeWorkspaceConnectionProperties.GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2` +- New function `*ServiceTagOutboundRule.GetOutboundRule() *OutboundRule` +- New function `*SparkJob.GetJobBaseProperties() *JobBaseProperties` +- New function `*SparkJobEntry.GetSparkJobEntry() *SparkJobEntry` +- New function `*SparkJobPythonEntry.GetSparkJobEntry() *SparkJobEntry` +- New function `*SparkJobScalaEntry.GetSparkJobEntry() *SparkJobEntry` +- New function `*StaticInputData.GetMonitoringInputDataBase() *MonitoringInputDataBase` +- New function `*TopNFeaturesByAttribution.GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase` +- New function `*TrailingInputData.GetMonitoringInputDataBase() *MonitoringInputDataBase` +- New function `*TritonInferencingServer.GetInferencingServer() *InferencingServer` +- New function `*Webhook.GetWebhook() *Webhook` +- New function `*WorkspaceConnectionsClient.ListSecrets(context.Context, string, string, string, *WorkspaceConnectionsClientListSecretsOptions) (WorkspaceConnectionsClientListSecretsResponse, error)` +- New function `*WorkspaceConnectionsClient.Update(context.Context, string, string, string, *WorkspaceConnectionsClientUpdateOptions) (WorkspaceConnectionsClientUpdateResponse, error)` +- New function `*Nodes.GetNodes() *Nodes` +- New function `*NumericalDataDriftMetricThreshold.GetDataDriftMetricThresholdBase() *DataDriftMetricThresholdBase` +- New function `*NumericalDataQualityMetricThreshold.GetDataQualityMetricThresholdBase() *DataQualityMetricThresholdBase` +- New function `*NumericalPredictionDriftMetricThreshold.GetPredictionDriftMetricThresholdBase() *PredictionDriftMetricThresholdBase` +- New struct `APIKeyAuthWorkspaceConnectionProperties` +- New struct `AccessKeyAuthTypeWorkspaceConnectionProperties` +- New struct `AcrDetails` +- New struct `AllFeatures` +- New struct `AllNodes` +- New struct `AmlTokenComputeIdentity` +- New struct `ArmResourceID` +- New struct `AutoDeleteSetting` +- New struct `AutologgerSettings` +- New struct `AzMonMonitoringAlertNotificationSettings` +- New struct `AzureDatastore` +- New struct `AzureDevOpsWebhook` +- New struct `AzureMLBatchInferencingServer` +- New struct `AzureMLOnlineInferencingServer` +- New struct `BaseEnvironmentID` +- New struct `BatchPipelineComponentDeploymentConfiguration` +- New struct `BindOptions` +- New struct `BlobReferenceForConsumptionDto` +- New struct `CSVExportSummary` +- New struct `CategoricalDataDriftMetricThreshold` +- New struct `CategoricalDataQualityMetricThreshold` +- New struct `CategoricalPredictionDriftMetricThreshold` +- New struct `ClassificationModelPerformanceMetricThreshold` +- New struct `CocoExportSummary` +- New struct `Collection` +- New struct `ComputeInstanceAutologgerSettings` +- New struct `ComputeRuntimeDto` +- New struct `CreateMonitorAction` +- New struct `Cron` +- New struct `CustomInferencingServer` +- New struct `CustomKeys` +- New struct `CustomKeysWorkspaceConnectionProperties` +- New struct `CustomMetricThreshold` +- New struct `CustomMonitoringSignal` +- New struct `CustomService` +- New struct `DataCollector` +- New struct `DataDriftMonitoringSignal` +- New struct `DataImport` +- New struct `DataQualityMonitoringSignal` +- New struct `DatabaseSource` +- New struct `DatasetExportSummary` +- New struct `Docker` +- New struct `EmailMonitoringAlertNotificationSettings` +- New struct `EncryptionKeyVaultUpdateProperties` +- New struct `EncryptionUpdateProperties` +- New struct `Endpoint` +- New struct `EnvironmentVariable` +- New struct `FQDNEndpointsPropertyBag` +- New struct `Feature` +- New struct `FeatureAttributionDriftMonitoringSignal` +- New struct `FeatureAttributionMetricThreshold` +- New struct `FeatureProperties` +- New struct `FeatureResourceArmPaginatedResult` +- New struct `FeatureStoreSettings` +- New struct `FeatureSubset` +- New struct `FeatureWindow` +- New struct `FeaturesetContainer` +- New struct `FeaturesetContainerProperties` +- New struct `FeaturesetContainerResourceArmPaginatedResult` +- New struct `FeaturesetJob` +- New struct `FeaturesetJobArmPaginatedResult` +- New struct `FeaturesetSpecification` +- New struct `FeaturesetVersion` +- New struct `FeaturesetVersionBackfillRequest` +- New struct `FeaturesetVersionProperties` +- New struct `FeaturesetVersionResourceArmPaginatedResult` +- New struct `FeaturestoreEntityContainer` +- New struct `FeaturestoreEntityContainerProperties` +- New struct `FeaturestoreEntityContainerResourceArmPaginatedResult` +- New struct `FeaturestoreEntityVersion` +- New struct `FeaturestoreEntityVersionProperties` +- New struct `FeaturestoreEntityVersionResourceArmPaginatedResult` +- New struct `FileSystemSource` +- New struct `FixedInputData` +- New struct `FqdnOutboundRule` +- New struct `GenerationSafetyQualityMetricThreshold` +- New struct `GenerationSafetyQualityMonitoringSignal` +- New struct `GenerationTokenStatisticsMetricThreshold` +- New struct `GenerationTokenStatisticsSignal` +- New struct `HdfsDatastore` +- New struct `IdleShutdownSetting` +- New struct `Image` +- New struct `ImageMetadata` +- New struct `ImportDataAction` +- New struct `IndexColumn` +- New struct `IntellectualProperty` +- New struct `KerberosCredentials` +- New struct `KerberosKeytabCredentials` +- New struct `KerberosKeytabSecrets` +- New struct `KerberosPasswordCredentials` +- New struct `KerberosPasswordSecrets` +- New struct `KeyVaultProperties` +- New struct `LabelCategory` +- New struct `LabelClass` +- New struct `LabelingDataConfiguration` +- New struct `LabelingJob` +- New struct `LabelingJobImageProperties` +- New struct `LabelingJobInstructions` +- New struct `LabelingJobProperties` +- New struct `LabelingJobResourceArmPaginatedResult` +- New struct `LabelingJobTextProperties` +- New struct `LakeHouseArtifact` +- New struct `MLAssistConfigurationDisabled` +- New struct `MLAssistConfigurationEnabled` +- New struct `ManagedComputeIdentity` +- New struct `ManagedNetworkProvisionOptions` +- New struct `ManagedNetworkProvisionStatus` +- New struct `ManagedNetworkSettings` +- New struct `MaterializationComputeResource` +- New struct `MaterializationSettings` +- New struct `ModelConfiguration` +- New struct `ModelPackageInput` +- New struct `ModelPerformanceSignal` +- New struct `MonitorDefinition` +- New struct `MonitorServerlessSparkCompute` +- New struct `MonitoringDataSegment` +- New struct `MonitoringTarget` +- New struct `MonitoringThreshold` +- New struct `MonitoringWorkspaceConnection` +- New struct `NlpFixedParameters` +- New struct `NlpParameterSubspace` +- New struct `NlpSweepSettings` +- New struct `NotificationSetting` +- New struct `NumericalDataDriftMetricThreshold` +- New struct `NumericalDataQualityMetricThreshold` +- New struct `NumericalPredictionDriftMetricThreshold` +- New struct `OneLakeDatastore` +- New struct `OnlineInferenceConfiguration` +- New struct `OperationDisplay` +- New struct `OutboundRuleBasicResource` +- New struct `OutboundRuleListResult` +- New struct `PackageInputPathID` +- New struct `PackageInputPathURL` +- New struct `PackageInputPathVersion` +- New struct `PackageRequest` +- New struct `PackageResponse` +- New struct `PartialJobBase` +- New struct `PartialJobBasePartialResource` +- New struct `PartialNotificationSetting` +- New struct `PartialRegistryPartialTrackedResource` +- New struct `PendingUploadRequestDto` +- New struct `PendingUploadResponseDto` +- New struct `PredictionDriftMonitoringSignal` +- New struct `PrivateEndpointDestination` +- New struct `PrivateEndpointOutboundRule` +- New struct `PrivateEndpointResource` +- New struct `ProgressMetrics` +- New struct `QueueSettings` +- New struct `Ray` +- New struct `Recurrence` +- New struct `Registry` +- New struct `RegistryPartialManagedServiceIdentity` +- New struct `RegistryPrivateEndpointConnection` +- New struct `RegistryPrivateEndpointConnectionProperties` +- New struct `RegistryPrivateLinkServiceConnectionState` +- New struct `RegistryProperties` +- New struct `RegistryRegionArmDetails` +- New struct `RegistryTrackedResourceArmPaginatedResult` +- New struct `RegressionModelPerformanceMetricThreshold` +- New struct `RequestLogging` +- New struct `SASCredentialDto` +- New struct `SecretConfiguration` +- New struct `ServicePrincipalAuthTypeWorkspaceConnectionProperties` +- New struct `ServiceTagDestination` +- New struct `ServiceTagOutboundRule` +- New struct `SparkJob` +- New struct `SparkJobPythonEntry` +- New struct `SparkJobScalaEntry` +- New struct `SparkResourceConfiguration` +- New struct `StaticInputData` +- New struct `StatusMessage` +- New struct `StorageAccountDetails` +- New struct `SystemCreatedAcrAccount` +- New struct `SystemCreatedStorageAccount` +- New struct `TableFixedParameters` +- New struct `TableParameterSubspace` +- New struct `TableSweepSettings` +- New struct `TmpfsOptions` +- New struct `TopNFeaturesByAttribution` +- New struct `TrailingInputData` +- New struct `TritonInferencingServer` +- New struct `UserCreatedAcrAccount` +- New struct `UserCreatedStorageAccount` +- New struct `VolumeDefinition` +- New struct `VolumeOptions` +- New struct `WorkspaceConnectionAPIKey` +- New struct `WorkspaceConnectionAccessKey` +- New struct `WorkspaceConnectionServicePrincipal` +- New struct `WorkspaceConnectionUpdateParameter` +- New struct `WorkspaceHubConfig` +- New struct `WorkspacePrivateEndpointResource` +- New field `Origin` in struct `AmlOperation` +- New field `AutoDeleteSetting` in struct `AssetBase` +- New field `AssetName`, `AssetVersion`, `AutoDeleteSetting` in struct `AssetJobOutput` +- New field `NotificationSetting`, `QueueSettings`, `SecretsConfiguration` in struct `AutoMLJob` +- New field `IntellectualProperty`, `ResourceGroup`, `SubscriptionID` in struct `AzureBlobDatastore` +- New field `IntellectualProperty`, `ResourceGroup`, `SubscriptionID` in struct `AzureDataLakeGen1Datastore` +- New field `IntellectualProperty`, `ResourceGroup`, `SubscriptionID` in struct `AzureDataLakeGen2Datastore` +- New field `IntellectualProperty`, `ResourceGroup`, `SubscriptionID` in struct `AzureFileDatastore` +- New field `DeploymentConfiguration` in struct `BatchDeploymentProperties` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `Classification` +- New field `TrainingMode` in struct `ClassificationTrainingSettings` +- New field `ProvisioningState` in struct `CodeContainerProperties` +- New field `AutoDeleteSetting`, `ProvisioningState` in struct `CodeVersionProperties` +- New field `Hash`, `HashVersion` in struct `CodeVersionsClientListOptions` +- New field `AutologgerSettings`, `NotificationSetting`, `QueueSettings`, `SecretsConfiguration` in struct `CommandJob` +- New field `ProvisioningState` in struct `ComponentContainerProperties` +- New field `AutoDeleteSetting`, `ProvisioningState`, `Stage` in struct `ComponentVersionProperties` +- New field `Stage` in struct `ComponentVersionsClientListOptions` +- New field `AutologgerSettings`, `CustomServices`, `IdleTimeBeforeShutdown`, `OSImageMetadata` in struct `ComputeInstanceProperties` +- New field `AssetName`, `AssetVersion`, `AutoDeleteSetting` in struct `CustomModelJobOutput` +- New field `Stage` in struct `DataVersionsClientListOptions` +- New field `Locations`, `MaxInstanceCount` in struct `DeploymentResourceConfiguration` +- New field `CosmosDbResourceID`, `SearchAccountResourceID`, `StorageAccountResourceID` in struct `EncryptionProperty` +- New field `ProvisioningState` in struct `EnvironmentContainerProperties` +- New field `AutoDeleteSetting`, `IntellectualProperty`, `ProvisioningState`, `Stage` in struct `EnvironmentVersionProperties` +- New field `Stage` in struct `EnvironmentVersionsClientListOptions` +- New field `Category`, `Endpoints` in struct `FQDNEndpoints` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `Forecasting` +- New field `FeaturesUnknownAtForecastTime` in struct `ForecastingSettings` +- New field `TrainingMode` in struct `ForecastingTrainingSettings` +- New field `LogTrainingMetrics`, `LogValidationLoss` in struct `ImageModelSettingsObjectDetection` +- New field `Locations`, `MaxInstanceCount` in struct `JobResourceConfiguration` +- New field `Nodes` in struct `JobService` +- New field `AssetName`, `ScheduleID`, `Scheduled` in struct `JobsClientListOptions` +- New field `DataCollector` in struct `KubernetesOnlineDeployment` +- New field `UserStorageArmID` in struct `ListWorkspaceKeysResult` +- New field `AssetName`, `AssetVersion`, `AutoDeleteSetting` in struct `MLFlowModelJobOutput` +- New field `AutoDeleteSetting`, `IntellectualProperty`, `Stage` in struct `MLTableData` +- New field `AssetName`, `AssetVersion`, `AutoDeleteSetting` in struct `MLTableJobOutput` +- New field `ExpiryTime`, `Metadata` in struct `ManagedIdentityAuthTypeWorkspaceConnectionProperties` +- New field `DataCollector` in struct `ManagedOnlineDeployment` +- New field `ProvisioningState` in struct `ModelContainerProperties` +- New field `AutoDeleteSetting`, `IntellectualProperty`, `ProvisioningState`, `Stage` in struct `ModelVersionProperties` +- New field `Stage` in struct `ModelVersionsClientListOptions` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `NlpVertical` +- New field `MaxNodes`, `TrialTimeout` in struct `NlpVerticalLimitSettings` +- New field `ExpiryTime`, `Metadata` in struct `NoneAuthTypeWorkspaceConnectionProperties` +- New field `IsPrivateLinkEnabled` in struct `NotebookResourceInfo` +- New field `DataCollector` in struct `OnlineDeploymentProperties` +- New field `MirrorTraffic` in struct `OnlineEndpointProperties` +- New field `ExpiryTime`, `Metadata` in struct `PATAuthTypeWorkspaceConnectionProperties` +- New field `NotificationSetting`, `SecretsConfiguration` in struct `PipelineJob` +- New field `Logbase` in struct `RandomSamplingAlgorithm` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `Regression` +- New field `TrainingMode` in struct `RegressionTrainingSettings` +- New field `Locations`, `MaxInstanceCount` in struct `ResourceConfiguration` +- New field `ExpiryTime`, `Metadata` in struct `SASAuthTypeWorkspaceConnectionProperties` +- New field `NotificationSetting`, `QueueSettings`, `SecretsConfiguration` in struct `SweepJob` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `TableVertical` +- New field `MaxNodes`, `SweepConcurrentTrials`, `SweepTrials` in struct `TableVerticalLimitSettings` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `TextClassification` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `TextClassificationMultilabel` +- New field `FixedParameters`, `SearchSpace`, `SweepSettings` in struct `TextNer` +- New field `TrainingMode` in struct `TrainingSettings` +- New field `AssetName`, `AssetVersion`, `AutoDeleteSetting` in struct `TritonModelJobOutput` +- New field `AutoDeleteSetting`, `IntellectualProperty`, `Stage` in struct `URIFileDataVersion` +- New field `AssetName`, `AssetVersion`, `AutoDeleteSetting` in struct `URIFileJobOutput` +- New field `AutoDeleteSetting`, `IntellectualProperty`, `Stage` in struct `URIFolderDataVersion` +- New field `AssetName`, `AssetVersion`, `AutoDeleteSetting` in struct `URIFolderJobOutput` +- New field `ExpiryTime`, `Metadata` in struct `UsernamePasswordAuthTypeWorkspaceConnectionProperties` +- New field `Kind` in struct `Workspace` +- New field `Body` in struct `WorkspaceConnectionsClientCreateOptions` +- New field `AssociatedWorkspaces`, `ContainerRegistries`, `EnableDataIsolation`, `ExistingWorkspaces`, `FeatureStoreSettings`, `HubResourceID`, `KeyVaults`, `ManagedNetwork`, `SoftDeleteRetentionInDays`, `StorageAccounts`, `SystemDatastoresAuthMode`, `WorkspaceHubConfig` in struct `WorkspaceProperties` +- New field `EnableDataIsolation`, `Encryption`, `FeatureStoreSettings`, `ManagedNetwork`, `SoftDeleteRetentionInDays`, `V1LegacyMode` in struct `WorkspacePropertiesUpdateParameters` +- New field `ForceToPurge` in struct `WorkspacesClientBeginDeleteOptions` +- New field `Body` in struct `WorkspacesClientBeginDiagnoseOptions` +- New field `Kind` in struct `WorkspacesClientListByResourceGroupOptions` +- New field `Kind` in struct `WorkspacesClientListBySubscriptionOptions` + + ## 3.1.1 (2023-04-14) ### Bug Fixes diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/autorest.md b/sdk/resourcemanager/machinelearning/armmachinelearning/autorest.md index 1ffc0d7ca04e..39f857ce766b 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/autorest.md +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/autorest.md @@ -5,8 +5,8 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/readme.go.md +- /mnt/vss/_work/1/s/azure-rest-api-specs/specification/machinelearningservices/resource-manager/readme.md +- /mnt/vss/_work/1/s/azure-rest-api-specs/specification/machinelearningservices/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 3.1.1 +module-version: 4.0.0-beta.1 ``` \ No newline at end of file diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/batchdeployments_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/batchdeployments_client.go index 54f231135067..8f673c039162 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/batchdeployments_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/batchdeployments_client.go @@ -48,7 +48,7 @@ func NewBatchDeploymentsClient(subscriptionID string, credential azcore.TokenCre // BeginCreateOrUpdate - Creates/updates a batch inference deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name @@ -62,7 +62,9 @@ func (client *BatchDeploymentsClient) BeginCreateOrUpdate(ctx context.Context, r if err != nil { return nil, err } - return runtime.NewPoller[BatchDeploymentsClientCreateOrUpdateResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[BatchDeploymentsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) } else { return runtime.NewPollerFromResumeToken[BatchDeploymentsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -71,7 +73,7 @@ func (client *BatchDeploymentsClient) BeginCreateOrUpdate(ctx context.Context, r // CreateOrUpdate - Creates/updates a batch inference deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *BatchDeploymentsClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, deploymentName string, body BatchDeployment, options *BatchDeploymentsClientBeginCreateOrUpdateOptions) (*http.Response, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, deploymentName, body, options) if err != nil { @@ -115,7 +117,7 @@ func (client *BatchDeploymentsClient) createOrUpdateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -124,7 +126,7 @@ func (client *BatchDeploymentsClient) createOrUpdateCreateRequest(ctx context.Co // BeginDelete - Delete Batch Inference deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Endpoint name @@ -137,7 +139,9 @@ func (client *BatchDeploymentsClient) BeginDelete(ctx context.Context, resourceG if err != nil { return nil, err } - return runtime.NewPoller[BatchDeploymentsClientDeleteResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[BatchDeploymentsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[BatchDeploymentsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -146,7 +150,7 @@ func (client *BatchDeploymentsClient) BeginDelete(ctx context.Context, resourceG // Delete - Delete Batch Inference deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *BatchDeploymentsClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, deploymentName string, options *BatchDeploymentsClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, deploymentName, options) if err != nil { @@ -190,7 +194,7 @@ func (client *BatchDeploymentsClient) deleteCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -199,7 +203,7 @@ func (client *BatchDeploymentsClient) deleteCreateRequest(ctx context.Context, r // Get - Gets a batch inference deployment by id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Endpoint name @@ -248,7 +252,7 @@ func (client *BatchDeploymentsClient) getCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -265,7 +269,7 @@ func (client *BatchDeploymentsClient) getHandleResponse(resp *http.Response) (Ba // NewListPager - Lists Batch inference deployments in the workspace. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Endpoint name @@ -323,7 +327,7 @@ func (client *BatchDeploymentsClient) listCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.OrderBy != nil { reqQP.Set("$orderBy", *options.OrderBy) } @@ -350,7 +354,7 @@ func (client *BatchDeploymentsClient) listHandleResponse(resp *http.Response) (B // BeginUpdate - Update a batch inference deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name @@ -373,7 +377,7 @@ func (client *BatchDeploymentsClient) BeginUpdate(ctx context.Context, resourceG // Update - Update a batch inference deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *BatchDeploymentsClient) update(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, deploymentName string, body PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, options *BatchDeploymentsClientBeginUpdateOptions) (*http.Response, error) { req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, deploymentName, body, options) if err != nil { @@ -417,7 +421,7 @@ func (client *BatchDeploymentsClient) updateCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/batchdeployments_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/batchdeployments_client_example_test.go deleted file mode 100644 index eca97d6c7d6b..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/batchdeployments_client_example_test.go +++ /dev/null @@ -1,495 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchDeployment/list.json -func ExampleBatchDeploymentsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewBatchDeploymentsClient().NewListPager("test-rg", "my-aml-workspace", "testEndpointName", &armmachinelearning.BatchDeploymentsClientListOptions{OrderBy: to.Ptr("string"), - Top: to.Ptr[int32](1), - Skip: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.BatchDeploymentTrackedResourceArmPaginatedResult = armmachinelearning.BatchDeploymentTrackedResourceArmPaginatedResult{ - // Value: []*armmachinelearning.BatchDeployment{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchDeploymentProperties{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Compute: to.Ptr("string"), - // ErrorThreshold: to.Ptr[int32](1), - // LoggingLevel: to.Ptr(armmachinelearning.BatchLoggingLevelInfo), - // MaxConcurrencyPerInstance: to.Ptr[int32](1), - // MiniBatchSize: to.Ptr[int64](1), - // Model: &armmachinelearning.IDAssetReference{ - // ReferenceType: to.Ptr(armmachinelearning.ReferenceTypeID), - // AssetID: to.Ptr("string"), - // }, - // OutputAction: to.Ptr(armmachinelearning.BatchOutputActionSummaryOnly), - // OutputFileName: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // Resources: &armmachinelearning.DeploymentResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "a3c13e2e-a213-4cac-9f5a-b49966906ad6": nil, - // }, - // }, - // }, - // RetrySettings: &armmachinelearning.BatchRetrySettings{ - // MaxRetries: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchDeployment/delete.json -func ExampleBatchDeploymentsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewBatchDeploymentsClient().BeginDelete(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchDeployment/get.json -func ExampleBatchDeploymentsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewBatchDeploymentsClient().Get(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.BatchDeployment = armmachinelearning.BatchDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchDeploymentProperties{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Compute: to.Ptr("string"), - // ErrorThreshold: to.Ptr[int32](1), - // LoggingLevel: to.Ptr(armmachinelearning.BatchLoggingLevelInfo), - // MaxConcurrencyPerInstance: to.Ptr[int32](1), - // MiniBatchSize: to.Ptr[int64](1), - // Model: &armmachinelearning.IDAssetReference{ - // ReferenceType: to.Ptr(armmachinelearning.ReferenceTypeID), - // AssetID: to.Ptr("string"), - // }, - // OutputAction: to.Ptr(armmachinelearning.BatchOutputActionSummaryOnly), - // OutputFileName: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // Resources: &armmachinelearning.DeploymentResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "843c2bb4-e5f1-4267-98c8-ba22a99dbb00": nil, - // }, - // }, - // }, - // RetrySettings: &armmachinelearning.BatchRetrySettings{ - // MaxRetries: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchDeployment/update.json -func ExampleBatchDeploymentsClient_BeginUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewBatchDeploymentsClient().BeginUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", armmachinelearning.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties{ - Properties: &armmachinelearning.PartialBatchDeployment{ - Description: to.Ptr("string"), - }, - Tags: map[string]*string{}, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.BatchDeployment = armmachinelearning.BatchDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchDeploymentProperties{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Compute: to.Ptr("string"), - // ErrorThreshold: to.Ptr[int32](1), - // LoggingLevel: to.Ptr(armmachinelearning.BatchLoggingLevelInfo), - // MaxConcurrencyPerInstance: to.Ptr[int32](1), - // MiniBatchSize: to.Ptr[int64](1), - // Model: &armmachinelearning.IDAssetReference{ - // ReferenceType: to.Ptr(armmachinelearning.ReferenceTypeID), - // AssetID: to.Ptr("string"), - // }, - // OutputAction: to.Ptr(armmachinelearning.BatchOutputActionSummaryOnly), - // OutputFileName: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // Resources: &armmachinelearning.DeploymentResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "1e5e1cf9-b0ea-4cf6-9764-e750bf85c10a": nil, - // }, - // }, - // }, - // RetrySettings: &armmachinelearning.BatchRetrySettings{ - // MaxRetries: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchDeployment/createOrUpdate.json -func ExampleBatchDeploymentsClient_BeginCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewBatchDeploymentsClient().BeginCreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", armmachinelearning.BatchDeployment{ - Location: to.Ptr("string"), - Tags: map[string]*string{}, - Identity: &armmachinelearning.ManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - "string": {}, - }, - }, - Kind: to.Ptr("string"), - Properties: &armmachinelearning.BatchDeploymentProperties{ - Description: to.Ptr("string"), - CodeConfiguration: &armmachinelearning.CodeConfiguration{ - CodeID: to.Ptr("string"), - ScoringScript: to.Ptr("string"), - }, - EnvironmentID: to.Ptr("string"), - EnvironmentVariables: map[string]*string{ - "string": to.Ptr("string"), - }, - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Compute: to.Ptr("string"), - ErrorThreshold: to.Ptr[int32](1), - LoggingLevel: to.Ptr(armmachinelearning.BatchLoggingLevelInfo), - MaxConcurrencyPerInstance: to.Ptr[int32](1), - MiniBatchSize: to.Ptr[int64](1), - Model: &armmachinelearning.IDAssetReference{ - ReferenceType: to.Ptr(armmachinelearning.ReferenceTypeID), - AssetID: to.Ptr("string"), - }, - OutputAction: to.Ptr(armmachinelearning.BatchOutputActionSummaryOnly), - OutputFileName: to.Ptr("string"), - Resources: &armmachinelearning.DeploymentResourceConfiguration{ - InstanceCount: to.Ptr[int32](1), - InstanceType: to.Ptr("string"), - Properties: map[string]any{ - "string": map[string]any{ - "cd3c37dc-2876-4ca4-8a54-21bd7619724a": nil, - }, - }, - }, - RetrySettings: &armmachinelearning.BatchRetrySettings{ - MaxRetries: to.Ptr[int32](1), - Timeout: to.Ptr("PT5M"), - }, - }, - SKU: &armmachinelearning.SKU{ - Name: to.Ptr("string"), - Capacity: to.Ptr[int32](1), - Family: to.Ptr("string"), - Size: to.Ptr("string"), - Tier: to.Ptr(armmachinelearning.SKUTierFree), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.BatchDeployment = armmachinelearning.BatchDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchDeploymentProperties{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Compute: to.Ptr("string"), - // ErrorThreshold: to.Ptr[int32](1), - // LoggingLevel: to.Ptr(armmachinelearning.BatchLoggingLevelInfo), - // MaxConcurrencyPerInstance: to.Ptr[int32](1), - // MiniBatchSize: to.Ptr[int64](1), - // Model: &armmachinelearning.IDAssetReference{ - // ReferenceType: to.Ptr(armmachinelearning.ReferenceTypeID), - // AssetID: to.Ptr("string"), - // }, - // OutputAction: to.Ptr(armmachinelearning.BatchOutputActionSummaryOnly), - // OutputFileName: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // Resources: &armmachinelearning.DeploymentResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "4939850d-8eae-4343-8566-0826259a2ad1": nil, - // }, - // }, - // }, - // RetrySettings: &armmachinelearning.BatchRetrySettings{ - // MaxRetries: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/batchendpoints_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/batchendpoints_client.go index 58214338fdb4..c94dac86c04f 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/batchendpoints_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/batchendpoints_client.go @@ -48,7 +48,7 @@ func NewBatchEndpointsClient(subscriptionID string, credential azcore.TokenCrede // BeginCreateOrUpdate - Creates a batch inference endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Name for the Batch inference endpoint. @@ -61,7 +61,9 @@ func (client *BatchEndpointsClient) BeginCreateOrUpdate(ctx context.Context, res if err != nil { return nil, err } - return runtime.NewPoller[BatchEndpointsClientCreateOrUpdateResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[BatchEndpointsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) } else { return runtime.NewPollerFromResumeToken[BatchEndpointsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -70,7 +72,7 @@ func (client *BatchEndpointsClient) BeginCreateOrUpdate(ctx context.Context, res // CreateOrUpdate - Creates a batch inference endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *BatchEndpointsClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, body BatchEndpoint, options *BatchEndpointsClientBeginCreateOrUpdateOptions) (*http.Response, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, body, options) if err != nil { @@ -110,7 +112,7 @@ func (client *BatchEndpointsClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -119,7 +121,7 @@ func (client *BatchEndpointsClient) createOrUpdateCreateRequest(ctx context.Cont // BeginDelete - Delete Batch Inference Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference Endpoint name. @@ -131,7 +133,9 @@ func (client *BatchEndpointsClient) BeginDelete(ctx context.Context, resourceGro if err != nil { return nil, err } - return runtime.NewPoller[BatchEndpointsClientDeleteResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[BatchEndpointsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[BatchEndpointsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -140,7 +144,7 @@ func (client *BatchEndpointsClient) BeginDelete(ctx context.Context, resourceGro // Delete - Delete Batch Inference Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *BatchEndpointsClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, options *BatchEndpointsClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, options) if err != nil { @@ -180,7 +184,7 @@ func (client *BatchEndpointsClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -189,7 +193,7 @@ func (client *BatchEndpointsClient) deleteCreateRequest(ctx context.Context, res // Get - Gets a batch inference endpoint by name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Name for the Batch Endpoint. @@ -233,7 +237,7 @@ func (client *BatchEndpointsClient) getCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -250,7 +254,7 @@ func (client *BatchEndpointsClient) getHandleResponse(resp *http.Response) (Batc // NewListPager - Lists Batch inference endpoint in the workspace. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - BatchEndpointsClientListOptions contains the optional parameters for the BatchEndpointsClient.NewListPager method. @@ -302,7 +306,7 @@ func (client *BatchEndpointsClient) listCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Count != nil { reqQP.Set("count", strconv.FormatInt(int64(*options.Count), 10)) } @@ -326,7 +330,7 @@ func (client *BatchEndpointsClient) listHandleResponse(resp *http.Response) (Bat // ListKeys - Lists batch Inference Endpoint keys. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference Endpoint name. @@ -370,7 +374,7 @@ func (client *BatchEndpointsClient) listKeysCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -388,7 +392,7 @@ func (client *BatchEndpointsClient) listKeysHandleResponse(resp *http.Response) // BeginUpdate - Update a batch inference endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Name for the Batch inference endpoint. @@ -410,7 +414,7 @@ func (client *BatchEndpointsClient) BeginUpdate(ctx context.Context, resourceGro // Update - Update a batch inference endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *BatchEndpointsClient) update(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, body PartialMinimalTrackedResourceWithIdentity, options *BatchEndpointsClientBeginUpdateOptions) (*http.Response, error) { req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, body, options) if err != nil { @@ -450,7 +454,7 @@ func (client *BatchEndpointsClient) updateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/batchendpoints_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/batchendpoints_client_example_test.go deleted file mode 100644 index f8c6b99dc25b..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/batchendpoints_client_example_test.go +++ /dev/null @@ -1,387 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchEndpoint/list.json -func ExampleBatchEndpointsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewBatchEndpointsClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.BatchEndpointsClientListOptions{Count: to.Ptr[int32](1), - Skip: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.BatchEndpointTrackedResourceArmPaginatedResult = armmachinelearning.BatchEndpointTrackedResourceArmPaginatedResult{ - // Value: []*armmachinelearning.BatchEndpoint{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Defaults: &armmachinelearning.BatchEndpointDefaults{ - // DeploymentName: to.Ptr("string"), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchEndpoint/delete.json -func ExampleBatchEndpointsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewBatchEndpointsClient().BeginDelete(ctx, "resourceGroup-1234", "testworkspace", "testBatchEndpoint", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchEndpoint/get.json -func ExampleBatchEndpointsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewBatchEndpointsClient().Get(ctx, "test-rg", "my-aml-workspace", "testEndpointName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.BatchEndpoint = armmachinelearning.BatchEndpoint{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Defaults: &armmachinelearning.BatchEndpointDefaults{ - // DeploymentName: to.Ptr("string"), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchEndpoint/update.json -func ExampleBatchEndpointsClient_BeginUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewBatchEndpointsClient().BeginUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", armmachinelearning.PartialMinimalTrackedResourceWithIdentity{ - Tags: map[string]*string{}, - Identity: &armmachinelearning.PartialManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - UserAssignedIdentities: map[string]any{ - "string": map[string]any{}, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.BatchEndpoint = armmachinelearning.BatchEndpoint{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Defaults: &armmachinelearning.BatchEndpointDefaults{ - // DeploymentName: to.Ptr("string"), - // }, - // ProvisioningState: to.Ptr(armmachinelearning.EndpointProvisioningStateSucceeded), - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchEndpoint/createOrUpdate.json -func ExampleBatchEndpointsClient_BeginCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewBatchEndpointsClient().BeginCreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", armmachinelearning.BatchEndpoint{ - Location: to.Ptr("string"), - Tags: map[string]*string{}, - Identity: &armmachinelearning.ManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - "string": {}, - }, - }, - Kind: to.Ptr("string"), - Properties: &armmachinelearning.BatchEndpointProperties{ - Description: to.Ptr("string"), - AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Defaults: &armmachinelearning.BatchEndpointDefaults{ - DeploymentName: to.Ptr("string"), - }, - }, - SKU: &armmachinelearning.SKU{ - Name: to.Ptr("string"), - Capacity: to.Ptr[int32](1), - Family: to.Ptr("string"), - Size: to.Ptr("string"), - Tier: to.Ptr(armmachinelearning.SKUTierFree), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.BatchEndpoint = armmachinelearning.BatchEndpoint{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.BatchEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Defaults: &armmachinelearning.BatchEndpointDefaults{ - // DeploymentName: to.Ptr("string"), - // }, - // ProvisioningState: to.Ptr(armmachinelearning.EndpointProvisioningStateSucceeded), - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/BatchEndpoint/listKeys.json -func ExampleBatchEndpointsClient_ListKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewBatchEndpointsClient().ListKeys(ctx, "test-rg", "my-aml-workspace", "testEndpointName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.EndpointAuthKeys = armmachinelearning.EndpointAuthKeys{ - // PrimaryKey: to.Ptr("string"), - // SecondaryKey: to.Ptr("string"), - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/client_factory.go b/sdk/resourcemanager/machinelearning/armmachinelearning/client_factory.go index ec015c4f692f..0c3e23262b25 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/client_factory.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/client_factory.go @@ -38,16 +38,6 @@ func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, }, nil } -func (c *ClientFactory) NewOperationsClient() *OperationsClient { - subClient, _ := NewOperationsClient(c.credential, c.options) - return subClient -} - -func (c *ClientFactory) NewWorkspacesClient() *WorkspacesClient { - subClient, _ := NewWorkspacesClient(c.subscriptionID, c.credential, c.options) - return subClient -} - func (c *ClientFactory) NewUsagesClient() *UsagesClient { subClient, _ := NewUsagesClient(c.subscriptionID, c.credential, c.options) return subClient @@ -68,18 +58,53 @@ func (c *ClientFactory) NewComputeClient() *ComputeClient { return subClient } -func (c *ClientFactory) NewPrivateEndpointConnectionsClient() *PrivateEndpointConnectionsClient { - subClient, _ := NewPrivateEndpointConnectionsClient(c.subscriptionID, c.credential, c.options) +func (c *ClientFactory) NewRegistryCodeContainersClient() *RegistryCodeContainersClient { + subClient, _ := NewRegistryCodeContainersClient(c.subscriptionID, c.credential, c.options) return subClient } -func (c *ClientFactory) NewPrivateLinkResourcesClient() *PrivateLinkResourcesClient { - subClient, _ := NewPrivateLinkResourcesClient(c.subscriptionID, c.credential, c.options) +func (c *ClientFactory) NewRegistryCodeVersionsClient() *RegistryCodeVersionsClient { + subClient, _ := NewRegistryCodeVersionsClient(c.subscriptionID, c.credential, c.options) return subClient } -func (c *ClientFactory) NewWorkspaceConnectionsClient() *WorkspaceConnectionsClient { - subClient, _ := NewWorkspaceConnectionsClient(c.subscriptionID, c.credential, c.options) +func (c *ClientFactory) NewRegistryComponentContainersClient() *RegistryComponentContainersClient { + subClient, _ := NewRegistryComponentContainersClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewRegistryComponentVersionsClient() *RegistryComponentVersionsClient { + subClient, _ := NewRegistryComponentVersionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewRegistryDataContainersClient() *RegistryDataContainersClient { + subClient, _ := NewRegistryDataContainersClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewRegistryDataVersionsClient() *RegistryDataVersionsClient { + subClient, _ := NewRegistryDataVersionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewRegistryEnvironmentContainersClient() *RegistryEnvironmentContainersClient { + subClient, _ := NewRegistryEnvironmentContainersClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewRegistryEnvironmentVersionsClient() *RegistryEnvironmentVersionsClient { + subClient, _ := NewRegistryEnvironmentVersionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewRegistryModelContainersClient() *RegistryModelContainersClient { + subClient, _ := NewRegistryModelContainersClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewRegistryModelVersionsClient() *RegistryModelVersionsClient { + subClient, _ := NewRegistryModelVersionsClient(c.subscriptionID, c.credential, c.options) return subClient } @@ -138,11 +163,41 @@ func (c *ClientFactory) NewEnvironmentVersionsClient() *EnvironmentVersionsClien return subClient } +func (c *ClientFactory) NewFeaturesetContainersClient() *FeaturesetContainersClient { + subClient, _ := NewFeaturesetContainersClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFeaturesClient() *FeaturesClient { + subClient, _ := NewFeaturesClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFeaturesetVersionsClient() *FeaturesetVersionsClient { + subClient, _ := NewFeaturesetVersionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFeaturestoreEntityContainersClient() *FeaturestoreEntityContainersClient { + subClient, _ := NewFeaturestoreEntityContainersClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFeaturestoreEntityVersionsClient() *FeaturestoreEntityVersionsClient { + subClient, _ := NewFeaturestoreEntityVersionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + func (c *ClientFactory) NewJobsClient() *JobsClient { subClient, _ := NewJobsClient(c.subscriptionID, c.credential, c.options) return subClient } +func (c *ClientFactory) NewLabelingJobsClient() *LabelingJobsClient { + subClient, _ := NewLabelingJobsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + func (c *ClientFactory) NewModelContainersClient() *ModelContainersClient { subClient, _ := NewModelContainersClient(c.subscriptionID, c.credential, c.options) return subClient @@ -168,7 +223,47 @@ func (c *ClientFactory) NewSchedulesClient() *SchedulesClient { return subClient } +func (c *ClientFactory) NewRegistriesClient() *RegistriesClient { + subClient, _ := NewRegistriesClient(c.subscriptionID, c.credential, c.options) + return subClient +} + func (c *ClientFactory) NewWorkspaceFeaturesClient() *WorkspaceFeaturesClient { subClient, _ := NewWorkspaceFeaturesClient(c.subscriptionID, c.credential, c.options) return subClient } + +func (c *ClientFactory) NewOperationsClient() *OperationsClient { + subClient, _ := NewOperationsClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewWorkspacesClient() *WorkspacesClient { + subClient, _ := NewWorkspacesClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewWorkspaceConnectionsClient() *WorkspaceConnectionsClient { + subClient, _ := NewWorkspaceConnectionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewManagedNetworkSettingsRuleClient() *ManagedNetworkSettingsRuleClient { + subClient, _ := NewManagedNetworkSettingsRuleClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewPrivateEndpointConnectionsClient() *PrivateEndpointConnectionsClient { + subClient, _ := NewPrivateEndpointConnectionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewPrivateLinkResourcesClient() *PrivateLinkResourcesClient { + subClient, _ := NewPrivateLinkResourcesClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewManagedNetworkProvisionsClient() *ManagedNetworkProvisionsClient { + subClient, _ := NewManagedNetworkProvisionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/codecontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/codecontainers_client.go index aa14d3e59348..300ffe0e9285 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/codecontainers_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/codecontainers_client.go @@ -47,7 +47,7 @@ func NewCodeContainersClient(subscriptionID string, credential azcore.TokenCrede // CreateOrUpdate - Create or update container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -93,7 +93,7 @@ func (client *CodeContainersClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -111,7 +111,7 @@ func (client *CodeContainersClient) createOrUpdateHandleResponse(resp *http.Resp // Delete - Delete container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -155,7 +155,7 @@ func (client *CodeContainersClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -164,7 +164,7 @@ func (client *CodeContainersClient) deleteCreateRequest(ctx context.Context, res // Get - Get container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -208,7 +208,7 @@ func (client *CodeContainersClient) getCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *CodeContainersClient) getHandleResponse(resp *http.Response) (Code // NewListPager - List containers. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - CodeContainersClientListOptions contains the optional parameters for the CodeContainersClient.NewListPager method. @@ -277,7 +277,7 @@ func (client *CodeContainersClient) listCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/codecontainers_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/codecontainers_client_example_test.go deleted file mode 100644 index 101195c5fa81..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/codecontainers_client_example_test.go +++ /dev/null @@ -1,192 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeContainer/list.json -func ExampleCodeContainersClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewCodeContainersClient().NewListPager("testrg123", "testworkspace", &armmachinelearning.CodeContainersClientListOptions{Skip: nil}) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.CodeContainerResourceArmPaginatedResult = armmachinelearning.CodeContainerResourceArmPaginatedResult{ - // Value: []*armmachinelearning.CodeContainer{ - // { - // Name: to.Ptr("testContainer"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/codes"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/codes/testContainer"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-08-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-08-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CodeContainerProperties{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "property1": to.Ptr("string"), - // "property2": to.Ptr("string"), - // }, - // }, - // }, - // { - // Name: to.Ptr("testContainer2"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/codes"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/codes/testContainer2"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-08-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-08-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CodeContainerProperties{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "property1": to.Ptr("string"), - // "property2": to.Ptr("string"), - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeContainer/delete.json -func ExampleCodeContainersClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewCodeContainersClient().Delete(ctx, "testrg123", "testworkspace", "testContainer", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeContainer/get.json -func ExampleCodeContainersClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewCodeContainersClient().Get(ctx, "testrg123", "testworkspace", "testContainer", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.CodeContainer = armmachinelearning.CodeContainer{ - // Name: to.Ptr("testContainer"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/codes"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/codes/testContainer"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-08-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CodeContainerProperties{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "property1": to.Ptr("string"), - // "property2": to.Ptr("string"), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeContainer/createOrUpdate.json -func ExampleCodeContainersClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewCodeContainersClient().CreateOrUpdate(ctx, "testrg123", "testworkspace", "testContainer", armmachinelearning.CodeContainer{ - Properties: &armmachinelearning.CodeContainerProperties{ - Description: to.Ptr("string"), - Tags: map[string]*string{ - "tag1": to.Ptr("value1"), - "tag2": to.Ptr("value2"), - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.CodeContainer = armmachinelearning.CodeContainer{ - // Name: to.Ptr("testContainer"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/codes"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/codes/testContainer"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CodeContainerProperties{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "property1": to.Ptr("string"), - // "property2": to.Ptr("string"), - // }, - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/codeversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/codeversions_client.go index be9271d8a299..9c5d5234381e 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/codeversions_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/codeversions_client.go @@ -45,10 +45,79 @@ func NewCodeVersionsClient(subscriptionID string, credential azcore.TokenCredent return client, nil } +// CreateOrGetStartPendingUpload - Generate a storage location and credential for the client to upload a code asset to. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Pending upload request object +// - options - CodeVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the CodeVersionsClient.CreateOrGetStartPendingUpload +// method. +func (client *CodeVersionsClient) CreateOrGetStartPendingUpload(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body PendingUploadRequestDto, options *CodeVersionsClientCreateOrGetStartPendingUploadOptions) (CodeVersionsClientCreateOrGetStartPendingUploadResponse, error) { + req, err := client.createOrGetStartPendingUploadCreateRequest(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return CodeVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CodeVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CodeVersionsClientCreateOrGetStartPendingUploadResponse{}, runtime.NewResponseError(resp) + } + return client.createOrGetStartPendingUploadHandleResponse(resp) +} + +// createOrGetStartPendingUploadCreateRequest creates the CreateOrGetStartPendingUpload request. +func (client *CodeVersionsClient) createOrGetStartPendingUploadCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body PendingUploadRequestDto, options *CodeVersionsClientCreateOrGetStartPendingUploadOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}/startPendingUpload" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// createOrGetStartPendingUploadHandleResponse handles the CreateOrGetStartPendingUpload response. +func (client *CodeVersionsClient) createOrGetStartPendingUploadHandleResponse(resp *http.Response) (CodeVersionsClientCreateOrGetStartPendingUploadResponse, error) { + result := CodeVersionsClientCreateOrGetStartPendingUploadResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PendingUploadResponseDto); err != nil { + return CodeVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + return result, nil +} + // CreateOrUpdate - Create or update version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -99,7 +168,7 @@ func (client *CodeVersionsClient) createOrUpdateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -117,7 +186,7 @@ func (client *CodeVersionsClient) createOrUpdateHandleResponse(resp *http.Respon // Delete - Delete version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -166,7 +235,7 @@ func (client *CodeVersionsClient) deleteCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -175,7 +244,7 @@ func (client *CodeVersionsClient) deleteCreateRequest(ctx context.Context, resou // Get - Get version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -224,7 +293,7 @@ func (client *CodeVersionsClient) getCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -241,7 +310,7 @@ func (client *CodeVersionsClient) getHandleResponse(resp *http.Response) (CodeVe // NewListPager - List versions. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -298,7 +367,7 @@ func (client *CodeVersionsClient) listCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.OrderBy != nil { reqQP.Set("$orderBy", *options.OrderBy) } @@ -308,6 +377,12 @@ func (client *CodeVersionsClient) listCreateRequest(ctx context.Context, resourc if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } + if options != nil && options.Hash != nil { + reqQP.Set("hash", *options.Hash) + } + if options != nil && options.HashVersion != nil { + reqQP.Set("hashVersion", *options.HashVersion) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/codeversions_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/codeversions_client_example_test.go deleted file mode 100644 index b6361426d039..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/codeversions_client_example_test.go +++ /dev/null @@ -1,191 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeVersion/list.json -func ExampleCodeVersionsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewCodeVersionsClient().NewListPager("test-rg", "my-aml-workspace", "string", &armmachinelearning.CodeVersionsClientListOptions{OrderBy: to.Ptr("string"), - Top: to.Ptr[int32](1), - Skip: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.CodeVersionResourceArmPaginatedResult = armmachinelearning.CodeVersionResourceArmPaginatedResult{ - // Value: []*armmachinelearning.CodeVersion{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CodeVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // CodeURI: to.Ptr("https://blobStorage/folderName"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeVersion/delete.json -func ExampleCodeVersionsClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewCodeVersionsClient().Delete(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeVersion/get.json -func ExampleCodeVersionsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewCodeVersionsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.CodeVersion = armmachinelearning.CodeVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CodeVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // CodeURI: to.Ptr("https://blobStorage/folderName"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/CodeVersion/createOrUpdate.json -func ExampleCodeVersionsClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewCodeVersionsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", "string", armmachinelearning.CodeVersion{ - Properties: &armmachinelearning.CodeVersionProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - IsAnonymous: to.Ptr(false), - CodeURI: to.Ptr("https://blobStorage/folderName"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.CodeVersion = armmachinelearning.CodeVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CodeVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // CodeURI: to.Ptr("https://blobStorage/folderName"), - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/componentcontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/componentcontainers_client.go index 744dfadbd872..500b9e517d22 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/componentcontainers_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/componentcontainers_client.go @@ -47,7 +47,7 @@ func NewComponentContainersClient(subscriptionID string, credential azcore.Token // CreateOrUpdate - Create or update container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -93,7 +93,7 @@ func (client *ComponentContainersClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -111,7 +111,7 @@ func (client *ComponentContainersClient) createOrUpdateHandleResponse(resp *http // Delete - Delete container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -156,7 +156,7 @@ func (client *ComponentContainersClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -165,7 +165,7 @@ func (client *ComponentContainersClient) deleteCreateRequest(ctx context.Context // Get - Get container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -209,7 +209,7 @@ func (client *ComponentContainersClient) getCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -226,7 +226,7 @@ func (client *ComponentContainersClient) getHandleResponse(resp *http.Response) // NewListPager - List component containers. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - ComponentContainersClientListOptions contains the optional parameters for the ComponentContainersClient.NewListPager @@ -279,7 +279,7 @@ func (client *ComponentContainersClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/componentcontainers_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/componentcontainers_client_example_test.go deleted file mode 100644 index 1c8a7547ad1b..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/componentcontainers_client_example_test.go +++ /dev/null @@ -1,182 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentContainer/list.json -func ExampleComponentContainersClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewComponentContainersClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.ComponentContainersClientListOptions{Skip: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ComponentContainerResourceArmPaginatedResult = armmachinelearning.ComponentContainerResourceArmPaginatedResult{ - // Value: []*armmachinelearning.ComponentContainer{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ComponentContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentContainer/delete.json -func ExampleComponentContainersClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewComponentContainersClient().Delete(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentContainer/get.json -func ExampleComponentContainersClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComponentContainersClient().Get(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComponentContainer = armmachinelearning.ComponentContainer{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ComponentContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentContainer/createOrUpdate.json -func ExampleComponentContainersClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComponentContainersClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.ComponentContainer{ - Properties: &armmachinelearning.ComponentContainerProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComponentContainer = armmachinelearning.ComponentContainer{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ComponentContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/componentversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/componentversions_client.go index 1a6e41534034..bfc599c8aed2 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/componentversions_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/componentversions_client.go @@ -48,7 +48,7 @@ func NewComponentVersionsClient(subscriptionID string, credential azcore.TokenCr // CreateOrUpdate - Create or update version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -99,7 +99,7 @@ func (client *ComponentVersionsClient) createOrUpdateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -117,7 +117,7 @@ func (client *ComponentVersionsClient) createOrUpdateHandleResponse(resp *http.R // Delete - Delete version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -167,7 +167,7 @@ func (client *ComponentVersionsClient) deleteCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -176,7 +176,7 @@ func (client *ComponentVersionsClient) deleteCreateRequest(ctx context.Context, // Get - Get version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -225,7 +225,7 @@ func (client *ComponentVersionsClient) getCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -242,7 +242,7 @@ func (client *ComponentVersionsClient) getHandleResponse(resp *http.Response) (C // NewListPager - List component versions. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Component name. @@ -300,7 +300,7 @@ func (client *ComponentVersionsClient) listCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.OrderBy != nil { reqQP.Set("$orderBy", *options.OrderBy) } @@ -313,6 +313,9 @@ func (client *ComponentVersionsClient) listCreateRequest(ctx context.Context, re if options != nil && options.ListViewType != nil { reqQP.Set("listViewType", string(*options.ListViewType)) } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/componentversions_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/componentversions_client_example_test.go deleted file mode 100644 index 0390560080e3..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/componentversions_client_example_test.go +++ /dev/null @@ -1,200 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentVersion/list.json -func ExampleComponentVersionsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewComponentVersionsClient().NewListPager("test-rg", "my-aml-workspace", "string", &armmachinelearning.ComponentVersionsClientListOptions{OrderBy: to.Ptr("string"), - Top: to.Ptr[int32](1), - Skip: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ComponentVersionResourceArmPaginatedResult = armmachinelearning.ComponentVersionResourceArmPaginatedResult{ - // Value: []*armmachinelearning.ComponentVersion{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ComponentVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // ComponentSpec: map[string]any{ - // "50acbce5-cccc-475a-8ac6-c4da402afbd8": nil, - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentVersion/delete.json -func ExampleComponentVersionsClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewComponentVersionsClient().Delete(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentVersion/get.json -func ExampleComponentVersionsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComponentVersionsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComponentVersion = armmachinelearning.ComponentVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ComponentVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // ComponentSpec: map[string]any{ - // "1a7c40b5-2029-4f5f-a8d6-fd0822038773": nil, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ComponentVersion/createOrUpdate.json -func ExampleComponentVersionsClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComponentVersionsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", "string", armmachinelearning.ComponentVersion{ - Properties: &armmachinelearning.ComponentVersionProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - IsAnonymous: to.Ptr(false), - ComponentSpec: map[string]any{ - "8ced901b-d826-477d-bfef-329da9672513": nil, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComponentVersion = armmachinelearning.ComponentVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ComponentVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // ComponentSpec: map[string]any{ - // "2de2e74e-457d-4447-a581-933abc2b9d96": nil, - // }, - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/compute_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/compute_client.go index e3e00bdf264d..c0ddd771108f 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/compute_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/compute_client.go @@ -49,7 +49,7 @@ func NewComputeClient(subscriptionID string, credential azcore.TokenCredential, // exist yet. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -73,7 +73,7 @@ func (client *ComputeClient) BeginCreateOrUpdate(ctx context.Context, resourceGr // exist yet. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *ComputeClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters ComputeResource, options *ComputeClientBeginCreateOrUpdateOptions) (*http.Response, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, computeName, parameters, options) if err != nil { @@ -113,7 +113,7 @@ func (client *ComputeClient) createOrUpdateCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, parameters) @@ -122,7 +122,7 @@ func (client *ComputeClient) createOrUpdateCreateRequest(ctx context.Context, re // BeginDelete - Deletes specified Machine Learning compute. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -144,7 +144,7 @@ func (client *ComputeClient) BeginDelete(ctx context.Context, resourceGroupName // Delete - Deletes specified Machine Learning compute. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *ComputeClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, underlyingResourceAction UnderlyingResourceAction, options *ComputeClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, computeName, underlyingResourceAction, options) if err != nil { @@ -184,7 +184,7 @@ func (client *ComputeClient) deleteCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") reqQP.Set("underlyingResourceAction", string(underlyingResourceAction)) req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -195,7 +195,7 @@ func (client *ComputeClient) deleteCreateRequest(ctx context.Context, resourceGr // 'keys' nested resource to get them. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -239,7 +239,7 @@ func (client *ComputeClient) getCreateRequest(ctx context.Context, resourceGroup return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -256,7 +256,7 @@ func (client *ComputeClient) getHandleResponse(resp *http.Response) (ComputeClie // NewListPager - Gets computes in specified workspace. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - ComputeClientListOptions contains the optional parameters for the ComputeClient.NewListPager method. @@ -308,7 +308,7 @@ func (client *ComputeClient) listCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } @@ -329,7 +329,7 @@ func (client *ComputeClient) listHandleResponse(resp *http.Response) (ComputeCli // ListKeys - Gets secrets related to Machine Learning compute (storage keys, service credentials, etc). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -373,7 +373,7 @@ func (client *ComputeClient) listKeysCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -390,7 +390,7 @@ func (client *ComputeClient) listKeysHandleResponse(resp *http.Response) (Comput // NewListNodesPager - Get the details (e.g IP address, port etc) of all the compute nodes in the compute. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -447,7 +447,7 @@ func (client *ComputeClient) listNodesCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -465,7 +465,7 @@ func (client *ComputeClient) listNodesHandleResponse(resp *http.Response) (Compu // BeginRestart - Posts a restart action to a compute instance // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -485,7 +485,7 @@ func (client *ComputeClient) BeginRestart(ctx context.Context, resourceGroupName // Restart - Posts a restart action to a compute instance // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *ComputeClient) restart(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, options *ComputeClientBeginRestartOptions) (*http.Response, error) { req, err := client.restartCreateRequest(ctx, resourceGroupName, workspaceName, computeName, options) if err != nil { @@ -525,7 +525,7 @@ func (client *ComputeClient) restartCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -534,7 +534,7 @@ func (client *ComputeClient) restartCreateRequest(ctx context.Context, resourceG // BeginStart - Posts a start action to a compute instance // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -554,7 +554,7 @@ func (client *ComputeClient) BeginStart(ctx context.Context, resourceGroupName s // Start - Posts a start action to a compute instance // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *ComputeClient) start(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, options *ComputeClientBeginStartOptions) (*http.Response, error) { req, err := client.startCreateRequest(ctx, resourceGroupName, workspaceName, computeName, options) if err != nil { @@ -594,7 +594,7 @@ func (client *ComputeClient) startCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -603,7 +603,7 @@ func (client *ComputeClient) startCreateRequest(ctx context.Context, resourceGro // BeginStop - Posts a stop action to a compute instance // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -623,7 +623,7 @@ func (client *ComputeClient) BeginStop(ctx context.Context, resourceGroupName st // Stop - Posts a stop action to a compute instance // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *ComputeClient) stop(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, options *ComputeClientBeginStopOptions) (*http.Response, error) { req, err := client.stopCreateRequest(ctx, resourceGroupName, workspaceName, computeName, options) if err != nil { @@ -663,7 +663,7 @@ func (client *ComputeClient) stopCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -673,7 +673,7 @@ func (client *ComputeClient) stopCreateRequest(ctx context.Context, resourceGrou // operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - computeName - Name of the Azure Machine Learning compute. @@ -694,7 +694,7 @@ func (client *ComputeClient) BeginUpdate(ctx context.Context, resourceGroupName // Update - Updates properties of a compute. This call will overwrite a compute if it exists. This is a nonrecoverable operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *ComputeClient) update(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters ClusterUpdateParameters, options *ComputeClientBeginUpdateOptions) (*http.Response, error) { req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, computeName, parameters, options) if err != nil { @@ -734,7 +734,117 @@ func (client *ComputeClient) updateCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// UpdateCustomServices - Updates the custom services list. The list of custom services provided shall be overwritten +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - computeName - Name of the Azure Machine Learning compute. +// - customServices - New list of Custom Services. +// - options - ComputeClientUpdateCustomServicesOptions contains the optional parameters for the ComputeClient.UpdateCustomServices +// method. +func (client *ComputeClient) UpdateCustomServices(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, customServices []*CustomService, options *ComputeClientUpdateCustomServicesOptions) (ComputeClientUpdateCustomServicesResponse, error) { + req, err := client.updateCustomServicesCreateRequest(ctx, resourceGroupName, workspaceName, computeName, customServices, options) + if err != nil { + return ComputeClientUpdateCustomServicesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ComputeClientUpdateCustomServicesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ComputeClientUpdateCustomServicesResponse{}, runtime.NewResponseError(resp) + } + return ComputeClientUpdateCustomServicesResponse{}, nil +} + +// updateCustomServicesCreateRequest creates the UpdateCustomServices request. +func (client *ComputeClient) updateCustomServicesCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, customServices []*CustomService, options *ComputeClientUpdateCustomServicesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/customServices" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if computeName == "" { + return nil, errors.New("parameter computeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{computeName}", url.PathEscape(computeName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, customServices) +} + +// UpdateIdleShutdownSetting - Updates the idle shutdown setting of a compute instance. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - computeName - Name of the Azure Machine Learning compute. +// - parameters - The object for updating idle shutdown setting of specified ComputeInstance. +// - options - ComputeClientUpdateIdleShutdownSettingOptions contains the optional parameters for the ComputeClient.UpdateIdleShutdownSetting +// method. +func (client *ComputeClient) UpdateIdleShutdownSetting(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters IdleShutdownSetting, options *ComputeClientUpdateIdleShutdownSettingOptions) (ComputeClientUpdateIdleShutdownSettingResponse, error) { + req, err := client.updateIdleShutdownSettingCreateRequest(ctx, resourceGroupName, workspaceName, computeName, parameters, options) + if err != nil { + return ComputeClientUpdateIdleShutdownSettingResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ComputeClientUpdateIdleShutdownSettingResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ComputeClientUpdateIdleShutdownSettingResponse{}, runtime.NewResponseError(resp) + } + return ComputeClientUpdateIdleShutdownSettingResponse{}, nil +} + +// updateIdleShutdownSettingCreateRequest creates the UpdateIdleShutdownSetting request. +func (client *ComputeClient) updateIdleShutdownSettingCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters IdleShutdownSetting, options *ComputeClientUpdateIdleShutdownSettingOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/updateIdleShutdownSetting" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if computeName == "" { + return nil, errors.New("parameter computeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{computeName}", url.PathEscape(computeName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, parameters) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/compute_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/compute_client_example_test.go deleted file mode 100644 index ef9d3e40bc79..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/compute_client_example_test.go +++ /dev/null @@ -1,1001 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/list.json -func ExampleComputeClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewComputeClient().NewListPager("testrg123", "workspaces123", &armmachinelearning.ComputeClientListOptions{Skip: nil}) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.PaginatedComputeResourcesList = armmachinelearning.PaginatedComputeResourcesList{ - // Value: []*armmachinelearning.ComputeResource{ - // { - // Properties: &armmachinelearning.AKS{ - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - // CreatedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ModifiedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute123-56826-c9b00420020b2"), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // }, - // { - // Properties: &armmachinelearning.AKS{ - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - // CreatedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ModifiedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute1234-56826-c9b00420020b2"), - // }, - // Name: to.Ptr("compute1234"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute1234"), - // Location: to.Ptr("eastus"), - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/get/AKSCompute.json -func ExampleComputeClient_Get_getAAksCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComputeClient().Get(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.AKS{ - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - // CreatedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ModifiedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute123-56826-c9b00420020b2"), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/get/AmlCompute.json -func ExampleComputeClient_Get_getAAmlCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComputeClient().Get(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.AmlCompute{ - // Properties: &armmachinelearning.AmlComputeProperties{ - // AllocationState: to.Ptr(armmachinelearning.AllocationStateResizing), - // AllocationStateTransitionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2017-09-27T22:28:08.998Z"); return t}()), - // CurrentNodeCount: to.Ptr[int32](0), - // EnableNodePublicIP: to.Ptr(true), - // IsolatedNetwork: to.Ptr(false), - // NodeStateCounts: &armmachinelearning.NodeStateCounts{ - // IdleNodeCount: to.Ptr[int32](0), - // LeavingNodeCount: to.Ptr[int32](0), - // PreemptedNodeCount: to.Ptr[int32](0), - // PreparingNodeCount: to.Ptr[int32](0), - // RunningNodeCount: to.Ptr[int32](0), - // UnusableNodeCount: to.Ptr[int32](0), - // }, - // OSType: to.Ptr(armmachinelearning.OsTypeWindows), - // RemoteLoginPortPublicAccess: to.Ptr(armmachinelearning.RemoteLoginPortPublicAccessEnabled), - // ScaleSettings: &armmachinelearning.ScaleSettings{ - // MaxNodeCount: to.Ptr[int32](1), - // MinNodeCount: to.Ptr[int32](0), - // NodeIdleTimeBeforeScaleDown: to.Ptr("PT5M"), - // }, - // Subnet: &armmachinelearning.ResourceID{ - // ID: to.Ptr("test-subnet-resource-id"), - // }, - // TargetNodeCount: to.Ptr[int32](1), - // VMPriority: to.Ptr(armmachinelearning.VMPriorityDedicated), - // VMSize: to.Ptr("STANDARD_NC6"), - // }, - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAmlCompute), - // CreatedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ModifiedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus2"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/get/KubernetesCompute.json -func ExampleComputeClient_Get_getAKubernetesCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComputeClient().Get(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.Kubernetes{ - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeKubernetes), - // CreatedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // IsAttachedCompute: to.Ptr(true), - // ModifiedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute123-56826-c9b00420020b2"), - // Properties: &armmachinelearning.KubernetesProperties{ - // DefaultInstanceType: to.Ptr("defaultInstanceType"), - // ExtensionInstanceReleaseTrain: to.Ptr("stable"), - // InstanceTypes: map[string]*armmachinelearning.InstanceTypeSchema{ - // "defaultInstanceType": &armmachinelearning.InstanceTypeSchema{ - // Resources: &armmachinelearning.InstanceTypeSchemaResources{ - // Limits: map[string]*string{ - // "cpu": to.Ptr("1"), - // "memory": to.Ptr("4Gi"), - // "nvidia.com/gpu": nil, - // }, - // Requests: map[string]*string{ - // "cpu": to.Ptr("1"), - // "memory": to.Ptr("4Gi"), - // "nvidia.com/gpu": nil, - // }, - // }, - // }, - // }, - // Namespace: to.Ptr("default"), - // }, - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/get/ComputeInstance.json -func ExampleComputeClient_Get_getAnComputeInstance() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComputeClient().Get(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.ComputeInstance{ - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeComputeInstance), - // CreatedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ModifiedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // Properties: &armmachinelearning.ComputeInstanceProperties{ - // ApplicationSharingPolicy: to.Ptr(armmachinelearning.ApplicationSharingPolicyShared), - // Applications: []*armmachinelearning.ComputeInstanceApplication{ - // { - // DisplayName: to.Ptr("Jupyter"), - // EndpointURI: to.Ptr("https://compute123.eastus2.azureml.net/jupyter"), - // }}, - // ComputeInstanceAuthorizationType: to.Ptr(armmachinelearning.ComputeInstanceAuthorizationTypePersonal), - // ConnectivityEndpoints: &armmachinelearning.ComputeInstanceConnectivityEndpoints{ - // PrivateIPAddress: to.Ptr("10.0.0.1"), - // PublicIPAddress: to.Ptr("10.0.0.1"), - // }, - // CreatedBy: &armmachinelearning.ComputeInstanceCreatedBy{ - // UserID: to.Ptr("00000000-0000-0000-0000-000000000000"), - // UserName: to.Ptr("foobar@microsoft.com"), - // UserOrgID: to.Ptr("00000000-0000-0000-0000-000000000000"), - // }, - // PersonalComputeInstanceSettings: &armmachinelearning.PersonalComputeInstanceSettings{ - // AssignedUser: &armmachinelearning.AssignedUser{ - // ObjectID: to.Ptr("00000000-0000-0000-0000-000000000000"), - // TenantID: to.Ptr("00000000-0000-0000-0000-000000000000"), - // }, - // }, - // SSHSettings: &armmachinelearning.ComputeInstanceSSHSettings{ - // AdminUserName: to.Ptr("azureuser"), - // SSHPort: to.Ptr[int32](22), - // SSHPublicAccess: to.Ptr(armmachinelearning.SSHPublicAccessEnabled), - // }, - // State: to.Ptr(armmachinelearning.ComputeInstanceStateRunning), - // Subnet: &armmachinelearning.ResourceID{ - // ID: to.Ptr("test-subnet-resource-id"), - // }, - // VMSize: to.Ptr("STANDARD_NC6"), - // }, - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus2"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/KubernetesCompute.json -func ExampleComputeClient_BeginCreateOrUpdate_attachAKubernetesCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.Kubernetes{ - Description: to.Ptr("some compute"), - ComputeType: to.Ptr(armmachinelearning.ComputeTypeKubernetes), - ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute123-56826-c9b00420020b2"), - Properties: &armmachinelearning.KubernetesProperties{ - DefaultInstanceType: to.Ptr("defaultInstanceType"), - InstanceTypes: map[string]*armmachinelearning.InstanceTypeSchema{ - "defaultInstanceType": { - Resources: &armmachinelearning.InstanceTypeSchemaResources{ - Limits: map[string]*string{ - "cpu": to.Ptr("1"), - "memory": to.Ptr("4Gi"), - "nvidia.com/gpu": nil, - }, - Requests: map[string]*string{ - "cpu": to.Ptr("1"), - "memory": to.Ptr("4Gi"), - "nvidia.com/gpu": nil, - }, - }, - }, - }, - Namespace: to.Ptr("default"), - }, - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.Kubernetes{ - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeKubernetes), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute123-56826-c9b00420020b2"), - // Properties: &armmachinelearning.KubernetesProperties{ - // DefaultInstanceType: to.Ptr("defaultInstanceType"), - // ExtensionInstanceReleaseTrain: to.Ptr("stable"), - // InstanceTypes: map[string]*armmachinelearning.InstanceTypeSchema{ - // "defaultInstanceType": &armmachinelearning.InstanceTypeSchema{ - // Resources: &armmachinelearning.InstanceTypeSchemaResources{ - // Limits: map[string]*string{ - // "cpu": to.Ptr("1"), - // "memory": to.Ptr("4Gi"), - // "nvidia.com/gpu": nil, - // }, - // Requests: map[string]*string{ - // "cpu": to.Ptr("1"), - // "memory": to.Ptr("4Gi"), - // "nvidia.com/gpu": nil, - // }, - // }, - // }, - // }, - // Namespace: to.Ptr("default"), - // }, - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/BasicAmlCompute.json -func ExampleComputeClient_BeginCreateOrUpdate_createAAmlCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.AmlCompute{ - Properties: &armmachinelearning.AmlComputeProperties{ - EnableNodePublicIP: to.Ptr(true), - IsolatedNetwork: to.Ptr(false), - OSType: to.Ptr(armmachinelearning.OsTypeWindows), - RemoteLoginPortPublicAccess: to.Ptr(armmachinelearning.RemoteLoginPortPublicAccessNotSpecified), - ScaleSettings: &armmachinelearning.ScaleSettings{ - MaxNodeCount: to.Ptr[int32](1), - MinNodeCount: to.Ptr[int32](0), - NodeIdleTimeBeforeScaleDown: to.Ptr("PT5M"), - }, - VirtualMachineImage: &armmachinelearning.VirtualMachineImage{ - ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myImageGallery/images/myImageDefinition/versions/0.0.1"), - }, - VMPriority: to.Ptr(armmachinelearning.VMPriorityDedicated), - VMSize: to.Ptr("STANDARD_NC6"), - }, - ComputeType: to.Ptr(armmachinelearning.ComputeTypeAmlCompute), - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.AmlCompute{ - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAmlCompute), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/BasicDataFactoryCompute.json -func ExampleComputeClient_BeginCreateOrUpdate_createADataFactoryCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.DataFactory{ - ComputeType: to.Ptr(armmachinelearning.ComputeTypeDataFactory), - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.DataFactory{ - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeDataFactory), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/BasicAKSCompute.json -func ExampleComputeClient_BeginCreateOrUpdate_createAnAksCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.AKS{ - ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.AKS{ - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/ComputeInstance.json -func ExampleComputeClient_BeginCreateOrUpdate_createAnComputeInstanceCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.ComputeInstance{ - ComputeType: to.Ptr(armmachinelearning.ComputeTypeComputeInstance), - Properties: &armmachinelearning.ComputeInstanceProperties{ - ApplicationSharingPolicy: to.Ptr(armmachinelearning.ApplicationSharingPolicyPersonal), - ComputeInstanceAuthorizationType: to.Ptr(armmachinelearning.ComputeInstanceAuthorizationTypePersonal), - PersonalComputeInstanceSettings: &armmachinelearning.PersonalComputeInstanceSettings{ - AssignedUser: &armmachinelearning.AssignedUser{ - ObjectID: to.Ptr("00000000-0000-0000-0000-000000000000"), - TenantID: to.Ptr("00000000-0000-0000-0000-000000000000"), - }, - }, - SSHSettings: &armmachinelearning.ComputeInstanceSSHSettings{ - SSHPublicAccess: to.Ptr(armmachinelearning.SSHPublicAccessDisabled), - }, - Subnet: &armmachinelearning.ResourceID{ - ID: to.Ptr("test-subnet-resource-id"), - }, - VMSize: to.Ptr("STANDARD_NC6"), - }, - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.ComputeInstance{ - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeComputeInstance), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/ComputeInstanceWithSchedules.json -func ExampleComputeClient_BeginCreateOrUpdate_createAnComputeInstanceComputeWithSchedules() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.ComputeInstance{ - ComputeType: to.Ptr(armmachinelearning.ComputeTypeComputeInstance), - Properties: &armmachinelearning.ComputeInstanceProperties{ - ApplicationSharingPolicy: to.Ptr(armmachinelearning.ApplicationSharingPolicyPersonal), - ComputeInstanceAuthorizationType: to.Ptr(armmachinelearning.ComputeInstanceAuthorizationTypePersonal), - PersonalComputeInstanceSettings: &armmachinelearning.PersonalComputeInstanceSettings{ - AssignedUser: &armmachinelearning.AssignedUser{ - ObjectID: to.Ptr("00000000-0000-0000-0000-000000000000"), - TenantID: to.Ptr("00000000-0000-0000-0000-000000000000"), - }, - }, - Schedules: &armmachinelearning.ComputeSchedules{ - ComputeStartStop: []*armmachinelearning.ComputeStartStopSchedule{ - { - Action: to.Ptr(armmachinelearning.ComputePowerActionStop), - Cron: &armmachinelearning.CronTrigger{ - StartTime: to.Ptr("2021-04-23T01:30:00"), - TimeZone: to.Ptr("Pacific Standard Time"), - Expression: to.Ptr("0 18 * * *"), - }, - Status: to.Ptr(armmachinelearning.ScheduleStatusEnabled), - TriggerType: to.Ptr(armmachinelearning.TriggerTypeCron), - }}, - }, - SSHSettings: &armmachinelearning.ComputeInstanceSSHSettings{ - SSHPublicAccess: to.Ptr(armmachinelearning.SSHPublicAccessDisabled), - }, - VMSize: to.Ptr("STANDARD_NC6"), - }, - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.ComputeInstance{ - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeComputeInstance), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/ComputeInstanceMinimal.json -func ExampleComputeClient_BeginCreateOrUpdate_createAnComputeInstanceComputeWithMinimalInputs() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.ComputeInstance{ - ComputeType: to.Ptr(armmachinelearning.ComputeTypeComputeInstance), - Properties: &armmachinelearning.ComputeInstanceProperties{ - VMSize: to.Ptr("STANDARD_NC6"), - }, - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.ComputeInstance{ - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeComputeInstance), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/AmlCompute.json -func ExampleComputeClient_BeginCreateOrUpdate_updateAAmlCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.AmlCompute{ - Properties: &armmachinelearning.AmlComputeProperties{ - ScaleSettings: &armmachinelearning.ScaleSettings{ - MaxNodeCount: to.Ptr[int32](4), - MinNodeCount: to.Ptr[int32](4), - NodeIdleTimeBeforeScaleDown: to.Ptr("PT5M"), - }, - }, - Description: to.Ptr("some compute"), - ComputeType: to.Ptr(armmachinelearning.ComputeTypeAmlCompute), - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.AmlCompute{ - // Properties: &armmachinelearning.AmlComputeProperties{ - // AllocationState: to.Ptr(armmachinelearning.AllocationStateResizing), - // AllocationStateTransitionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2017-09-27T22:28:08.998Z"); return t}()), - // CurrentNodeCount: to.Ptr[int32](0), - // EnableNodePublicIP: to.Ptr(true), - // IsolatedNetwork: to.Ptr(false), - // NodeStateCounts: &armmachinelearning.NodeStateCounts{ - // IdleNodeCount: to.Ptr[int32](0), - // LeavingNodeCount: to.Ptr[int32](0), - // PreemptedNodeCount: to.Ptr[int32](0), - // PreparingNodeCount: to.Ptr[int32](0), - // RunningNodeCount: to.Ptr[int32](0), - // UnusableNodeCount: to.Ptr[int32](0), - // }, - // OSType: to.Ptr(armmachinelearning.OsTypeWindows), - // RemoteLoginPortPublicAccess: to.Ptr(armmachinelearning.RemoteLoginPortPublicAccessEnabled), - // ScaleSettings: &armmachinelearning.ScaleSettings{ - // MaxNodeCount: to.Ptr[int32](1), - // MinNodeCount: to.Ptr[int32](0), - // NodeIdleTimeBeforeScaleDown: to.Ptr("PT5M"), - // }, - // Subnet: &armmachinelearning.ResourceID{ - // ID: to.Ptr("test-subnet-resource-id"), - // }, - // TargetNodeCount: to.Ptr[int32](1), - // VMPriority: to.Ptr(armmachinelearning.VMPriorityDedicated), - // VMSize: to.Ptr("STANDARD_NC6"), - // }, - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAmlCompute), - // CreatedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ModifiedOn: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-01T22:00:00.0000000+00:00"); return t}()), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus2"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/createOrUpdate/AKSCompute.json -func ExampleComputeClient_BeginCreateOrUpdate_updateAnAksCompute() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginCreateOrUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ComputeResource{ - Properties: &armmachinelearning.AKS{ - Properties: &armmachinelearning.AKSSchemaProperties{ - AgentCount: to.Ptr[int32](4), - }, - Description: to.Ptr("some compute"), - ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute123-56826-c9b00420020b2"), - }, - Location: to.Ptr("eastus"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.AKS{ - // Properties: &armmachinelearning.AKSSchemaProperties{ - // AgentCount: to.Ptr[int32](4), - // }, - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // ResourceID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourcegroups/testrg123/providers/Microsoft.ContainerService/managedClusters/compute123-56826-c9b00420020b2"), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/patch.json -func ExampleComputeClient_BeginUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginUpdate(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.ClusterUpdateParameters{ - Properties: &armmachinelearning.ClusterUpdateProperties{ - Properties: &armmachinelearning.ScaleSettingsInformation{ - ScaleSettings: &armmachinelearning.ScaleSettings{ - MaxNodeCount: to.Ptr[int32](4), - MinNodeCount: to.Ptr[int32](4), - NodeIdleTimeBeforeScaleDown: to.Ptr("PT5M"), - }, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ComputeResource = armmachinelearning.ComputeResource{ - // Properties: &armmachinelearning.AmlCompute{ - // Description: to.Ptr("some compute"), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAmlCompute), - // ProvisioningState: to.Ptr(armmachinelearning.ProvisioningStateSucceeded), - // }, - // Name: to.Ptr("compute123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes"), - // ID: to.Ptr("/subscriptions/34adfa4f-cedf-4dc0-ba29-b6d1a69ab345/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspaces123/computes/compute123"), - // Location: to.Ptr("eastus2"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/delete.json -func ExampleComputeClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginDelete(ctx, "testrg123", "workspaces123", "compute123", armmachinelearning.UnderlyingResourceActionDelete, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/listNodes.json -func ExampleComputeClient_NewListNodesPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewComputeClient().NewListNodesPager("testrg123", "workspaces123", "compute123", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Nodes { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.AmlComputeNodesInformation = armmachinelearning.AmlComputeNodesInformation{ - // Nodes: []*armmachinelearning.AmlComputeNodeInformation{ - // { - // NodeID: to.Ptr("tvm-3601533753_1-20170719t162906z"), - // NodeState: to.Ptr(armmachinelearning.NodeStateRunning), - // Port: to.Ptr[int32](50000), - // PrivateIPAddress: to.Ptr("13.84.190.124"), - // PublicIPAddress: to.Ptr("13.84.190.134"), - // RunID: to.Ptr("2f378a44-38f2-443a-9f0d-9909d0b47890"), - // }, - // { - // NodeID: to.Ptr("tvm-3601533753_2-20170719t162906z"), - // NodeState: to.Ptr(armmachinelearning.NodeStateIdle), - // Port: to.Ptr[int32](50001), - // PrivateIPAddress: to.Ptr("13.84.190.124"), - // PublicIPAddress: to.Ptr("13.84.190.134"), - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/listKeys.json -func ExampleComputeClient_ListKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewComputeClient().ListKeys(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res = armmachinelearning.ComputeClientListKeysResponse{ - // ComputeSecretsClassification: &armmachinelearning.AksComputeSecrets{ - // AdminKubeConfig: to.Ptr("admin kube config..."), - // ImagePullSecretName: to.Ptr("the image pull secret name"), - // UserKubeConfig: to.Ptr("user kube config..."), - // ComputeType: to.Ptr(armmachinelearning.ComputeTypeAKS), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/start.json -func ExampleComputeClient_BeginStart() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginStart(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/stop.json -func ExampleComputeClient_BeginStop() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginStop(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Compute/restart.json -func ExampleComputeClient_BeginRestart() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewComputeClient().BeginRestart(ctx, "testrg123", "workspaces123", "compute123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/constants.go b/sdk/resourcemanager/machinelearning/armmachinelearning/constants.go index 61c80364cc8a..6ef662386fcd 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/constants.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/constants.go @@ -11,7 +11,7 @@ package armmachinelearning const ( moduleName = "armmachinelearning" - moduleVersion = "v3.1.1" + moduleVersion = "v4.0.0-beta.1" ) // AllocationState - Allocation state of the compute. Possible values are: steady - Indicates that the compute is not resizing. @@ -52,6 +52,45 @@ func PossibleApplicationSharingPolicyValues() []ApplicationSharingPolicy { } } +// AssetProvisioningState - Provisioning state of registry asset. +type AssetProvisioningState string + +const ( + AssetProvisioningStateCanceled AssetProvisioningState = "Canceled" + AssetProvisioningStateCreating AssetProvisioningState = "Creating" + AssetProvisioningStateDeleting AssetProvisioningState = "Deleting" + AssetProvisioningStateFailed AssetProvisioningState = "Failed" + AssetProvisioningStateSucceeded AssetProvisioningState = "Succeeded" + AssetProvisioningStateUpdating AssetProvisioningState = "Updating" +) + +// PossibleAssetProvisioningStateValues returns the possible values for the AssetProvisioningState const type. +func PossibleAssetProvisioningStateValues() []AssetProvisioningState { + return []AssetProvisioningState{ + AssetProvisioningStateCanceled, + AssetProvisioningStateCreating, + AssetProvisioningStateDeleting, + AssetProvisioningStateFailed, + AssetProvisioningStateSucceeded, + AssetProvisioningStateUpdating, + } +} + +type AutoDeleteCondition string + +const ( + AutoDeleteConditionCreatedGreaterThan AutoDeleteCondition = "CreatedGreaterThan" + AutoDeleteConditionLastAccessedGreaterThan AutoDeleteCondition = "LastAccessedGreaterThan" +) + +// PossibleAutoDeleteConditionValues returns the possible values for the AutoDeleteCondition const type. +func PossibleAutoDeleteConditionValues() []AutoDeleteCondition { + return []AutoDeleteCondition{ + AutoDeleteConditionCreatedGreaterThan, + AutoDeleteConditionLastAccessedGreaterThan, + } +} + // AutoRebuildSetting - AutoRebuild setting for the derived image type AutoRebuildSetting string @@ -86,6 +125,36 @@ func PossibleAutosaveValues() []Autosave { } } +// BaseEnvironmentSourceType - Base environment type. +type BaseEnvironmentSourceType string + +const ( + BaseEnvironmentSourceTypeEnvironmentAsset BaseEnvironmentSourceType = "EnvironmentAsset" +) + +// PossibleBaseEnvironmentSourceTypeValues returns the possible values for the BaseEnvironmentSourceType const type. +func PossibleBaseEnvironmentSourceTypeValues() []BaseEnvironmentSourceType { + return []BaseEnvironmentSourceType{ + BaseEnvironmentSourceTypeEnvironmentAsset, + } +} + +// BatchDeploymentConfigurationType - The enumerated property types for batch deployments. +type BatchDeploymentConfigurationType string + +const ( + BatchDeploymentConfigurationTypeModel BatchDeploymentConfigurationType = "Model" + BatchDeploymentConfigurationTypePipelineComponent BatchDeploymentConfigurationType = "PipelineComponent" +) + +// PossibleBatchDeploymentConfigurationTypeValues returns the possible values for the BatchDeploymentConfigurationType const type. +func PossibleBatchDeploymentConfigurationTypeValues() []BatchDeploymentConfigurationType { + return []BatchDeploymentConfigurationType{ + BatchDeploymentConfigurationTypeModel, + BatchDeploymentConfigurationTypePipelineComponent, + } +} + // BatchLoggingLevel - Log verbosity for batch inferencing. Increasing verbosity order for logging is : Warning, Info and // Debug. The default value is Info. type BatchLoggingLevel string @@ -199,6 +268,86 @@ func PossibleCachingValues() []Caching { } } +type CategoricalDataDriftMetric string + +const ( + // CategoricalDataDriftMetricJensenShannonDistance - The Jensen Shannon Distance (JSD) metric. + CategoricalDataDriftMetricJensenShannonDistance CategoricalDataDriftMetric = "JensenShannonDistance" + // CategoricalDataDriftMetricPearsonsChiSquaredTest - The Pearsons Chi Squared Test metric. + CategoricalDataDriftMetricPearsonsChiSquaredTest CategoricalDataDriftMetric = "PearsonsChiSquaredTest" + // CategoricalDataDriftMetricPopulationStabilityIndex - The Population Stability Index (PSI) metric. + CategoricalDataDriftMetricPopulationStabilityIndex CategoricalDataDriftMetric = "PopulationStabilityIndex" +) + +// PossibleCategoricalDataDriftMetricValues returns the possible values for the CategoricalDataDriftMetric const type. +func PossibleCategoricalDataDriftMetricValues() []CategoricalDataDriftMetric { + return []CategoricalDataDriftMetric{ + CategoricalDataDriftMetricJensenShannonDistance, + CategoricalDataDriftMetricPearsonsChiSquaredTest, + CategoricalDataDriftMetricPopulationStabilityIndex, + } +} + +type CategoricalDataQualityMetric string + +const ( + // CategoricalDataQualityMetricDataTypeErrorRate - Calculates the rate of data type errors. + CategoricalDataQualityMetricDataTypeErrorRate CategoricalDataQualityMetric = "DataTypeErrorRate" + // CategoricalDataQualityMetricNullValueRate - Calculates the rate of null values. + CategoricalDataQualityMetricNullValueRate CategoricalDataQualityMetric = "NullValueRate" + // CategoricalDataQualityMetricOutOfBoundsRate - Calculates the rate values are out of bounds. + CategoricalDataQualityMetricOutOfBoundsRate CategoricalDataQualityMetric = "OutOfBoundsRate" +) + +// PossibleCategoricalDataQualityMetricValues returns the possible values for the CategoricalDataQualityMetric const type. +func PossibleCategoricalDataQualityMetricValues() []CategoricalDataQualityMetric { + return []CategoricalDataQualityMetric{ + CategoricalDataQualityMetricDataTypeErrorRate, + CategoricalDataQualityMetricNullValueRate, + CategoricalDataQualityMetricOutOfBoundsRate, + } +} + +type CategoricalPredictionDriftMetric string + +const ( + // CategoricalPredictionDriftMetricJensenShannonDistance - The Jensen Shannon Distance (JSD) metric. + CategoricalPredictionDriftMetricJensenShannonDistance CategoricalPredictionDriftMetric = "JensenShannonDistance" + // CategoricalPredictionDriftMetricPearsonsChiSquaredTest - The Pearsons Chi Squared Test metric. + CategoricalPredictionDriftMetricPearsonsChiSquaredTest CategoricalPredictionDriftMetric = "PearsonsChiSquaredTest" + // CategoricalPredictionDriftMetricPopulationStabilityIndex - The Population Stability Index (PSI) metric. + CategoricalPredictionDriftMetricPopulationStabilityIndex CategoricalPredictionDriftMetric = "PopulationStabilityIndex" +) + +// PossibleCategoricalPredictionDriftMetricValues returns the possible values for the CategoricalPredictionDriftMetric const type. +func PossibleCategoricalPredictionDriftMetricValues() []CategoricalPredictionDriftMetric { + return []CategoricalPredictionDriftMetric{ + CategoricalPredictionDriftMetricJensenShannonDistance, + CategoricalPredictionDriftMetricPearsonsChiSquaredTest, + CategoricalPredictionDriftMetricPopulationStabilityIndex, + } +} + +type ClassificationModelPerformanceMetric string + +const ( + // ClassificationModelPerformanceMetricAccuracy - Calculates the accuracy of the model predictions. + ClassificationModelPerformanceMetricAccuracy ClassificationModelPerformanceMetric = "Accuracy" + // ClassificationModelPerformanceMetricPrecision - Calculates the precision of the model predictions. + ClassificationModelPerformanceMetricPrecision ClassificationModelPerformanceMetric = "Precision" + // ClassificationModelPerformanceMetricRecall - Calculates the recall of the model predictions. + ClassificationModelPerformanceMetricRecall ClassificationModelPerformanceMetric = "Recall" +) + +// PossibleClassificationModelPerformanceMetricValues returns the possible values for the ClassificationModelPerformanceMetric const type. +func PossibleClassificationModelPerformanceMetricValues() []ClassificationModelPerformanceMetric { + return []ClassificationModelPerformanceMetric{ + ClassificationModelPerformanceMetricAccuracy, + ClassificationModelPerformanceMetricPrecision, + ClassificationModelPerformanceMetricRecall, + } +} + // ClassificationModels - Enum for all classification models supported by AutoML. type ClassificationModels string @@ -239,7 +388,7 @@ const ( // tf-idf may also work. ClassificationModelsMultinomialNaiveBayes ClassificationModels = "MultinomialNaiveBayes" // ClassificationModelsRandomForest - Random forest is a supervised learning algorithm. - // The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. + // The "forest" it builds, is an ensemble of decision trees, usually trained with the bagging method. // The general idea of the bagging method is that a combination of learning models increases the overall result. ClassificationModelsRandomForest ClassificationModels = "RandomForest" // ClassificationModelsSGD - SGD: Stochastic gradient descent is an optimization algorithm often used in machine learning @@ -420,7 +569,7 @@ func PossibleComputeInstanceStateValues() []ComputeInstanceState { } } -// ComputePowerAction - The compute power action. +// ComputePowerAction - [Required] The compute power action. type ComputePowerAction string const ( @@ -472,20 +621,28 @@ func PossibleComputeTypeValues() []ComputeType { type ConnectionAuthType string const ( + ConnectionAuthTypeAPIKey ConnectionAuthType = "ApiKey" + ConnectionAuthTypeAccessKey ConnectionAuthType = "AccessKey" + ConnectionAuthTypeCustomKeys ConnectionAuthType = "CustomKeys" ConnectionAuthTypeManagedIdentity ConnectionAuthType = "ManagedIdentity" ConnectionAuthTypeNone ConnectionAuthType = "None" ConnectionAuthTypePAT ConnectionAuthType = "PAT" ConnectionAuthTypeSAS ConnectionAuthType = "SAS" + ConnectionAuthTypeServicePrincipal ConnectionAuthType = "ServicePrincipal" ConnectionAuthTypeUsernamePassword ConnectionAuthType = "UsernamePassword" ) // PossibleConnectionAuthTypeValues returns the possible values for the ConnectionAuthType const type. func PossibleConnectionAuthTypeValues() []ConnectionAuthType { return []ConnectionAuthType{ + ConnectionAuthTypeAPIKey, + ConnectionAuthTypeAccessKey, + ConnectionAuthTypeCustomKeys, ConnectionAuthTypeManagedIdentity, ConnectionAuthTypeNone, ConnectionAuthTypePAT, ConnectionAuthTypeSAS, + ConnectionAuthTypeServicePrincipal, ConnectionAuthTypeUsernamePassword, } } @@ -494,24 +651,55 @@ func PossibleConnectionAuthTypeValues() []ConnectionAuthType { type ConnectionCategory string const ( - ConnectionCategoryContainerRegistry ConnectionCategory = "ContainerRegistry" - ConnectionCategoryGit ConnectionCategory = "Git" - ConnectionCategoryPythonFeed ConnectionCategory = "PythonFeed" + ConnectionCategoryADLSGen2 ConnectionCategory = "ADLSGen2" + ConnectionCategoryAPIKey ConnectionCategory = "ApiKey" + ConnectionCategoryAzureMySQLDb ConnectionCategory = "AzureMySqlDb" + ConnectionCategoryAzureOpenAI ConnectionCategory = "AzureOpenAI" + ConnectionCategoryAzurePostgresDb ConnectionCategory = "AzurePostgresDb" + ConnectionCategoryAzureSQLDb ConnectionCategory = "AzureSqlDb" + ConnectionCategoryAzureSynapseAnalytics ConnectionCategory = "AzureSynapseAnalytics" + ConnectionCategoryCognitiveSearch ConnectionCategory = "CognitiveSearch" + ConnectionCategoryCognitiveService ConnectionCategory = "CognitiveService" + ConnectionCategoryContainerRegistry ConnectionCategory = "ContainerRegistry" + ConnectionCategoryCustomKeys ConnectionCategory = "CustomKeys" + ConnectionCategoryGit ConnectionCategory = "Git" + ConnectionCategoryPythonFeed ConnectionCategory = "PythonFeed" + ConnectionCategoryRedis ConnectionCategory = "Redis" + ConnectionCategoryS3 ConnectionCategory = "S3" + ConnectionCategorySnowflake ConnectionCategory = "Snowflake" ) // PossibleConnectionCategoryValues returns the possible values for the ConnectionCategory const type. func PossibleConnectionCategoryValues() []ConnectionCategory { return []ConnectionCategory{ + ConnectionCategoryADLSGen2, + ConnectionCategoryAPIKey, + ConnectionCategoryAzureMySQLDb, + ConnectionCategoryAzureOpenAI, + ConnectionCategoryAzurePostgresDb, + ConnectionCategoryAzureSQLDb, + ConnectionCategoryAzureSynapseAnalytics, + ConnectionCategoryCognitiveSearch, + ConnectionCategoryCognitiveService, ConnectionCategoryContainerRegistry, + ConnectionCategoryCustomKeys, ConnectionCategoryGit, ConnectionCategoryPythonFeed, + ConnectionCategoryRedis, + ConnectionCategoryS3, + ConnectionCategorySnowflake, } } +// ContainerType - The type of container to retrieve logs from. type ContainerType string const ( - ContainerTypeInferenceServer ContainerType = "InferenceServer" + // ContainerTypeInferenceServer - The container used to serve user's request. + ContainerTypeInferenceServer ContainerType = "InferenceServer" + // ContainerTypeModelDataCollector - The container used to collect payload and custom logging when mdc is enabled. + ContainerTypeModelDataCollector ContainerType = "ModelDataCollector" + // ContainerTypeStorageInitializer - The container used to download models and score script. ContainerTypeStorageInitializer ContainerType = "StorageInitializer" ) @@ -519,6 +707,7 @@ const ( func PossibleContainerTypeValues() []ContainerType { return []ContainerType{ ContainerTypeInferenceServer, + ContainerTypeModelDataCollector, ContainerTypeStorageInitializer, } } @@ -549,6 +738,8 @@ type CredentialsType string const ( CredentialsTypeAccountKey CredentialsType = "AccountKey" CredentialsTypeCertificate CredentialsType = "Certificate" + CredentialsTypeKerberosKeytab CredentialsType = "KerberosKeytab" + CredentialsTypeKerberosPassword CredentialsType = "KerberosPassword" CredentialsTypeNone CredentialsType = "None" CredentialsTypeSas CredentialsType = "Sas" CredentialsTypeServicePrincipal CredentialsType = "ServicePrincipal" @@ -559,12 +750,45 @@ func PossibleCredentialsTypeValues() []CredentialsType { return []CredentialsType{ CredentialsTypeAccountKey, CredentialsTypeCertificate, + CredentialsTypeKerberosKeytab, + CredentialsTypeKerberosPassword, CredentialsTypeNone, CredentialsTypeSas, CredentialsTypeServicePrincipal, } } +type DataCollectionMode string + +const ( + DataCollectionModeDisabled DataCollectionMode = "Disabled" + DataCollectionModeEnabled DataCollectionMode = "Enabled" +) + +// PossibleDataCollectionModeValues returns the possible values for the DataCollectionMode const type. +func PossibleDataCollectionModeValues() []DataCollectionMode { + return []DataCollectionMode{ + DataCollectionModeDisabled, + DataCollectionModeEnabled, + } +} + +// DataImportSourceType - Enum to determine the type of data. +type DataImportSourceType string + +const ( + DataImportSourceTypeDatabase DataImportSourceType = "database" + DataImportSourceTypeFileSystem DataImportSourceType = "file_system" +) + +// PossibleDataImportSourceTypeValues returns the possible values for the DataImportSourceType const type. +func PossibleDataImportSourceTypeValues() []DataImportSourceType { + return []DataImportSourceType{ + DataImportSourceTypeDatabase, + DataImportSourceTypeFileSystem, + } +} + // DataType - Enum to determine the type of data. type DataType string @@ -591,6 +815,8 @@ const ( DatastoreTypeAzureDataLakeGen1 DatastoreType = "AzureDataLakeGen1" DatastoreTypeAzureDataLakeGen2 DatastoreType = "AzureDataLakeGen2" DatastoreTypeAzureFile DatastoreType = "AzureFile" + DatastoreTypeHdfs DatastoreType = "Hdfs" + DatastoreTypeOneLake DatastoreType = "OneLake" ) // PossibleDatastoreTypeValues returns the possible values for the DatastoreType const type. @@ -600,6 +826,8 @@ func PossibleDatastoreTypeValues() []DatastoreType { DatastoreTypeAzureDataLakeGen1, DatastoreTypeAzureDataLakeGen2, DatastoreTypeAzureFile, + DatastoreTypeHdfs, + DatastoreTypeOneLake, } } @@ -653,6 +881,7 @@ type DistributionType string const ( DistributionTypeMpi DistributionType = "Mpi" DistributionTypePyTorch DistributionType = "PyTorch" + DistributionTypeRay DistributionType = "Ray" DistributionTypeTensorFlow DistributionType = "TensorFlow" ) @@ -661,6 +890,7 @@ func PossibleDistributionTypeValues() []DistributionType { return []DistributionType{ DistributionTypeMpi, DistributionTypePyTorch, + DistributionTypeRay, DistributionTypeTensorFlow, } } @@ -698,6 +928,24 @@ func PossibleEgressPublicNetworkAccessTypeValues() []EgressPublicNetworkAccessTy } } +// EmailNotificationEnableType - Enum to determine the email notification type. +type EmailNotificationEnableType string + +const ( + EmailNotificationEnableTypeJobCancelled EmailNotificationEnableType = "JobCancelled" + EmailNotificationEnableTypeJobCompleted EmailNotificationEnableType = "JobCompleted" + EmailNotificationEnableTypeJobFailed EmailNotificationEnableType = "JobFailed" +) + +// PossibleEmailNotificationEnableTypeValues returns the possible values for the EmailNotificationEnableType const type. +func PossibleEmailNotificationEnableTypeValues() []EmailNotificationEnableType { + return []EmailNotificationEnableType{ + EmailNotificationEnableTypeJobCancelled, + EmailNotificationEnableTypeJobCompleted, + EmailNotificationEnableTypeJobFailed, + } +} + // EncryptionStatus - Indicates whether or not the encryption is enabled for the workspace. type EncryptionStatus string @@ -774,6 +1022,28 @@ func PossibleEndpointProvisioningStateValues() []EndpointProvisioningState { } } +// EndpointServiceConnectionStatus - Connection status of the service consumer with the service provider +type EndpointServiceConnectionStatus string + +const ( + EndpointServiceConnectionStatusApproved EndpointServiceConnectionStatus = "Approved" + EndpointServiceConnectionStatusDisconnected EndpointServiceConnectionStatus = "Disconnected" + EndpointServiceConnectionStatusPending EndpointServiceConnectionStatus = "Pending" + EndpointServiceConnectionStatusRejected EndpointServiceConnectionStatus = "Rejected" + EndpointServiceConnectionStatusTimeout EndpointServiceConnectionStatus = "Timeout" +) + +// PossibleEndpointServiceConnectionStatusValues returns the possible values for the EndpointServiceConnectionStatus const type. +func PossibleEndpointServiceConnectionStatusValues() []EndpointServiceConnectionStatus { + return []EndpointServiceConnectionStatus{ + EndpointServiceConnectionStatusApproved, + EndpointServiceConnectionStatusDisconnected, + EndpointServiceConnectionStatusPending, + EndpointServiceConnectionStatusRejected, + EndpointServiceConnectionStatusTimeout, + } +} + // EnvironmentType - Environment type is either user created or curated by Azure ML service type EnvironmentType string @@ -790,6 +1060,79 @@ func PossibleEnvironmentTypeValues() []EnvironmentType { } } +// EnvironmentVariableType - Type of the Environment Variable. Possible values are: local - For local variable +type EnvironmentVariableType string + +const ( + EnvironmentVariableTypeLocal EnvironmentVariableType = "local" +) + +// PossibleEnvironmentVariableTypeValues returns the possible values for the EnvironmentVariableType const type. +func PossibleEnvironmentVariableTypeValues() []EnvironmentVariableType { + return []EnvironmentVariableType{ + EnvironmentVariableTypeLocal, + } +} + +// ExportFormatType - The format of exported labels. +type ExportFormatType string + +const ( + ExportFormatTypeCSV ExportFormatType = "CSV" + ExportFormatTypeCoco ExportFormatType = "Coco" + ExportFormatTypeDataset ExportFormatType = "Dataset" +) + +// PossibleExportFormatTypeValues returns the possible values for the ExportFormatType const type. +func PossibleExportFormatTypeValues() []ExportFormatType { + return []ExportFormatType{ + ExportFormatTypeCSV, + ExportFormatTypeCoco, + ExportFormatTypeDataset, + } +} + +type FeatureAttributionMetric string + +const ( + // FeatureAttributionMetricNormalizedDiscountedCumulativeGain - The Normalized Discounted Cumulative Gain metric. + FeatureAttributionMetricNormalizedDiscountedCumulativeGain FeatureAttributionMetric = "NormalizedDiscountedCumulativeGain" +) + +// PossibleFeatureAttributionMetricValues returns the possible values for the FeatureAttributionMetric const type. +func PossibleFeatureAttributionMetricValues() []FeatureAttributionMetric { + return []FeatureAttributionMetric{ + FeatureAttributionMetricNormalizedDiscountedCumulativeGain, + } +} + +type FeatureDataType string + +const ( + FeatureDataTypeBinary FeatureDataType = "Binary" + FeatureDataTypeBoolean FeatureDataType = "Boolean" + FeatureDataTypeDatetime FeatureDataType = "Datetime" + FeatureDataTypeDouble FeatureDataType = "Double" + FeatureDataTypeFloat FeatureDataType = "Float" + FeatureDataTypeInteger FeatureDataType = "Integer" + FeatureDataTypeLong FeatureDataType = "Long" + FeatureDataTypeString FeatureDataType = "String" +) + +// PossibleFeatureDataTypeValues returns the possible values for the FeatureDataType const type. +func PossibleFeatureDataTypeValues() []FeatureDataType { + return []FeatureDataType{ + FeatureDataTypeBinary, + FeatureDataTypeBoolean, + FeatureDataTypeDatetime, + FeatureDataTypeDouble, + FeatureDataTypeFloat, + FeatureDataTypeInteger, + FeatureDataTypeLong, + FeatureDataTypeString, + } +} + // FeatureLags - Flag for generating lags for the numeric features. type FeatureLags string @@ -808,6 +1151,21 @@ func PossibleFeatureLagsValues() []FeatureLags { } } +type FeaturestoreJobType string + +const ( + FeaturestoreJobTypeBackfillMaterialization FeaturestoreJobType = "BackfillMaterialization" + FeaturestoreJobTypeRecurrentMaterialization FeaturestoreJobType = "RecurrentMaterialization" +) + +// PossibleFeaturestoreJobTypeValues returns the possible values for the FeaturestoreJobType const type. +func PossibleFeaturestoreJobTypeValues() []FeaturestoreJobType { + return []FeaturestoreJobType{ + FeaturestoreJobTypeBackfillMaterialization, + FeaturestoreJobTypeRecurrentMaterialization, + } +} + // FeaturizationMode - Featurization mode - determines data featurization mode. type FeaturizationMode string @@ -898,7 +1256,7 @@ const ( // to missing data and shifts in the trend, and typically handles outliers well. ForecastingModelsProphet ForecastingModels = "Prophet" // ForecastingModelsRandomForest - Random forest is a supervised learning algorithm. - // The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. + // The "forest" it builds, is an ensemble of decision trees, usually trained with the bagging method. // The general idea of the bagging method is that a combination of learning models increases the overall result. ForecastingModelsRandomForest ForecastingModels = "RandomForest" // ForecastingModelsSGD - SGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications @@ -972,6 +1330,54 @@ func PossibleForecastingPrimaryMetricsValues() []ForecastingPrimaryMetrics { } } +// GenerationSafetyQualityMetric - Generation safety quality metric enum. +type GenerationSafetyQualityMetric string + +const ( + GenerationSafetyQualityMetricAcceptableCoherenceScorePerInstance GenerationSafetyQualityMetric = "AcceptableCoherenceScorePerInstance" + GenerationSafetyQualityMetricAcceptableFluencyScorePerInstance GenerationSafetyQualityMetric = "AcceptableFluencyScorePerInstance" + GenerationSafetyQualityMetricAcceptableGroundednessScorePerInstance GenerationSafetyQualityMetric = "AcceptableGroundednessScorePerInstance" + GenerationSafetyQualityMetricAcceptableRelevanceScorePerInstance GenerationSafetyQualityMetric = "AcceptableRelevanceScorePerInstance" + GenerationSafetyQualityMetricAcceptableSimilarityScorePerInstance GenerationSafetyQualityMetric = "AcceptableSimilarityScorePerInstance" + GenerationSafetyQualityMetricAggregatedCoherencePassRate GenerationSafetyQualityMetric = "AggregatedCoherencePassRate" + GenerationSafetyQualityMetricAggregatedFluencyPassRate GenerationSafetyQualityMetric = "AggregatedFluencyPassRate" + GenerationSafetyQualityMetricAggregatedGroundednessPassRate GenerationSafetyQualityMetric = "AggregatedGroundednessPassRate" + GenerationSafetyQualityMetricAggregatedRelevancePassRate GenerationSafetyQualityMetric = "AggregatedRelevancePassRate" + GenerationSafetyQualityMetricAggregatedSimilarityPassRate GenerationSafetyQualityMetric = "AggregatedSimilarityPassRate" +) + +// PossibleGenerationSafetyQualityMetricValues returns the possible values for the GenerationSafetyQualityMetric const type. +func PossibleGenerationSafetyQualityMetricValues() []GenerationSafetyQualityMetric { + return []GenerationSafetyQualityMetric{ + GenerationSafetyQualityMetricAcceptableCoherenceScorePerInstance, + GenerationSafetyQualityMetricAcceptableFluencyScorePerInstance, + GenerationSafetyQualityMetricAcceptableGroundednessScorePerInstance, + GenerationSafetyQualityMetricAcceptableRelevanceScorePerInstance, + GenerationSafetyQualityMetricAcceptableSimilarityScorePerInstance, + GenerationSafetyQualityMetricAggregatedCoherencePassRate, + GenerationSafetyQualityMetricAggregatedFluencyPassRate, + GenerationSafetyQualityMetricAggregatedGroundednessPassRate, + GenerationSafetyQualityMetricAggregatedRelevancePassRate, + GenerationSafetyQualityMetricAggregatedSimilarityPassRate, + } +} + +// GenerationTokenStatisticsMetric - Generation token statistics metric enum. +type GenerationTokenStatisticsMetric string + +const ( + GenerationTokenStatisticsMetricTotalTokenCount GenerationTokenStatisticsMetric = "TotalTokenCount" + GenerationTokenStatisticsMetricTotalTokenCountPerGroup GenerationTokenStatisticsMetric = "TotalTokenCountPerGroup" +) + +// PossibleGenerationTokenStatisticsMetricValues returns the possible values for the GenerationTokenStatisticsMetric const type. +func PossibleGenerationTokenStatisticsMetricValues() []GenerationTokenStatisticsMetric { + return []GenerationTokenStatisticsMetric{ + GenerationTokenStatisticsMetricTotalTokenCount, + GenerationTokenStatisticsMetricTotalTokenCountPerGroup, + } +} + // Goal - Defines supported metric goals for hyperparameter tuning type Goal string @@ -1006,6 +1412,76 @@ func PossibleIdentityConfigurationTypeValues() []IdentityConfigurationType { } } +// ImageAnnotationType - Annotation type of image data. +type ImageAnnotationType string + +const ( + ImageAnnotationTypeBoundingBox ImageAnnotationType = "BoundingBox" + ImageAnnotationTypeClassification ImageAnnotationType = "Classification" + ImageAnnotationTypeInstanceSegmentation ImageAnnotationType = "InstanceSegmentation" +) + +// PossibleImageAnnotationTypeValues returns the possible values for the ImageAnnotationType const type. +func PossibleImageAnnotationTypeValues() []ImageAnnotationType { + return []ImageAnnotationType{ + ImageAnnotationTypeBoundingBox, + ImageAnnotationTypeClassification, + ImageAnnotationTypeInstanceSegmentation, + } +} + +// ImageType - Type of the image. Possible values are: docker - For docker images. azureml - For AzureML images +type ImageType string + +const ( + ImageTypeAzureml ImageType = "azureml" + ImageTypeDocker ImageType = "docker" +) + +// PossibleImageTypeValues returns the possible values for the ImageType const type. +func PossibleImageTypeValues() []ImageType { + return []ImageType{ + ImageTypeAzureml, + ImageTypeDocker, + } +} + +// IncrementalDataRefresh - Whether IncrementalDataRefresh is enabled +type IncrementalDataRefresh string + +const ( + IncrementalDataRefreshDisabled IncrementalDataRefresh = "Disabled" + IncrementalDataRefreshEnabled IncrementalDataRefresh = "Enabled" +) + +// PossibleIncrementalDataRefreshValues returns the possible values for the IncrementalDataRefresh const type. +func PossibleIncrementalDataRefreshValues() []IncrementalDataRefresh { + return []IncrementalDataRefresh{ + IncrementalDataRefreshDisabled, + IncrementalDataRefreshEnabled, + } +} + +// InferencingServerType - Inferencing server type for various targets. +type InferencingServerType string + +const ( + InferencingServerTypeAzureMLBatch InferencingServerType = "AzureMLBatch" + InferencingServerTypeAzureMLOnline InferencingServerType = "AzureMLOnline" + InferencingServerTypeCustom InferencingServerType = "Custom" + InferencingServerTypeTriton InferencingServerType = "Triton" +) + +// PossibleInferencingServerTypeValues returns the possible values for the InferencingServerType const type. +func PossibleInferencingServerTypeValues() []InferencingServerType { + return []InferencingServerType{ + InferencingServerTypeAzureMLBatch, + InferencingServerTypeAzureMLOnline, + InferencingServerTypeCustom, + InferencingServerTypeTriton, + } +} + // InputDeliveryMode - Enum to determine the input data delivery mode. type InputDeliveryMode string @@ -1030,6 +1506,24 @@ func PossibleInputDeliveryModeValues() []InputDeliveryMode { } } +// InputPathType - Input path type for package inputs. +type InputPathType string + +const ( + InputPathTypePathID InputPathType = "PathId" + InputPathTypePathVersion InputPathType = "PathVersion" + InputPathTypeURL InputPathType = "Url" +) + +// PossibleInputPathTypeValues returns the possible values for the InputPathType const type. +func PossibleInputPathTypeValues() []InputPathType { + return []InputPathType{ + InputPathTypePathID, + InputPathTypePathVersion, + InputPathTypeURL, + } +} + // InstanceSegmentationPrimaryMetrics - Primary metrics for InstanceSegmentation tasks. type InstanceSegmentationPrimaryMetrics string @@ -1046,6 +1540,24 @@ func PossibleInstanceSegmentationPrimaryMetricsValues() []InstanceSegmentationPr } } +// IsolationMode - Isolation mode for the managed network of a machine learning workspace. +type IsolationMode string + +const ( + IsolationModeAllowInternetOutbound IsolationMode = "AllowInternetOutbound" + IsolationModeAllowOnlyApprovedOutbound IsolationMode = "AllowOnlyApprovedOutbound" + IsolationModeDisabled IsolationMode = "Disabled" +) + +// PossibleIsolationModeValues returns the possible values for the IsolationMode const type. +func PossibleIsolationModeValues() []IsolationMode { + return []IsolationMode{ + IsolationModeAllowInternetOutbound, + IsolationModeAllowOnlyApprovedOutbound, + IsolationModeDisabled, + } +} + // JobInputType - Enum to determine the Job Input Type. type JobInputType string @@ -1111,6 +1623,26 @@ func PossibleJobOutputTypeValues() []JobOutputType { } } +// JobProvisioningState - Enum to determine the job provisioning state. +type JobProvisioningState string + +const ( + JobProvisioningStateCanceled JobProvisioningState = "Canceled" + JobProvisioningStateFailed JobProvisioningState = "Failed" + JobProvisioningStateInProgress JobProvisioningState = "InProgress" + JobProvisioningStateSucceeded JobProvisioningState = "Succeeded" +) + +// PossibleJobProvisioningStateValues returns the possible values for the JobProvisioningState const type. +func PossibleJobProvisioningStateValues() []JobProvisioningState { + return []JobProvisioningState{ + JobProvisioningStateCanceled, + JobProvisioningStateFailed, + JobProvisioningStateInProgress, + JobProvisioningStateSucceeded, + } +} + // JobStatus - The status of a job. type JobStatus string @@ -1144,6 +1676,8 @@ const ( JobStatusQueued JobStatus = "Queued" // JobStatusRunning - The job started to run in the compute target. JobStatusRunning JobStatus = "Running" + // JobStatusScheduled - The job is in a scheduled state. Job is not in any active state. + JobStatusScheduled JobStatus = "Scheduled" // JobStatusStarting - Run has started. The user has a run ID. JobStatusStarting JobStatus = "Starting" // JobStatusUnknown - Default job status if not mapped to all other statuses @@ -1165,18 +1699,43 @@ func PossibleJobStatusValues() []JobStatus { JobStatusProvisioning, JobStatusQueued, JobStatusRunning, + JobStatusScheduled, JobStatusStarting, JobStatusUnknown, } } +// JobTier - Enum to determine the job tier. +type JobTier string + +const ( + JobTierBasic JobTier = "Basic" + JobTierNull JobTier = "Null" + JobTierPremium JobTier = "Premium" + JobTierSpot JobTier = "Spot" + JobTierStandard JobTier = "Standard" +) + +// PossibleJobTierValues returns the possible values for the JobTier const type. +func PossibleJobTierValues() []JobTier { + return []JobTier{ + JobTierBasic, + JobTierNull, + JobTierPremium, + JobTierSpot, + JobTierStandard, + } +} + // JobType - Enum to determine the type of job. type JobType string const ( JobTypeAutoML JobType = "AutoML" JobTypeCommand JobType = "Command" + JobTypeLabeling JobType = "Labeling" JobTypePipeline JobType = "Pipeline" + JobTypeSpark JobType = "Spark" JobTypeSweep JobType = "Sweep" ) @@ -1185,7 +1744,9 @@ func PossibleJobTypeValues() []JobType { return []JobType{ JobTypeAutoML, JobTypeCommand, + JobTypeLabeling, JobTypePipeline, + JobTypeSpark, JobTypeSweep, } } @@ -1259,6 +1820,40 @@ func PossibleLoadBalancerTypeValues() []LoadBalancerType { } } +type LogTrainingMetrics string + +const ( + // LogTrainingMetricsDisable - Disable compute and log training metrics. + LogTrainingMetricsDisable LogTrainingMetrics = "Disable" + // LogTrainingMetricsEnable - Enable compute and log training metrics. + LogTrainingMetricsEnable LogTrainingMetrics = "Enable" +) + +// PossibleLogTrainingMetricsValues returns the possible values for the LogTrainingMetrics const type. +func PossibleLogTrainingMetricsValues() []LogTrainingMetrics { + return []LogTrainingMetrics{ + LogTrainingMetricsDisable, + LogTrainingMetricsEnable, + } +} + +type LogValidationLoss string + +const ( + // LogValidationLossDisable - Disable compute and log validation metrics. + LogValidationLossDisable LogValidationLoss = "Disable" + // LogValidationLossEnable - Enable compute and log validation metrics. + LogValidationLossEnable LogValidationLoss = "Enable" +) + +// PossibleLogValidationLossValues returns the possible values for the LogValidationLoss const type. +func PossibleLogValidationLossValues() []LogValidationLoss { + return []LogValidationLoss{ + LogValidationLossDisable, + LogValidationLossEnable, + } +} + // LogVerbosity - Enum for setting log verbosity. type LogVerbosity string @@ -1289,6 +1884,53 @@ func PossibleLogVerbosityValues() []LogVerbosity { } } +type MLAssistConfigurationType string + +const ( + MLAssistConfigurationTypeDisabled MLAssistConfigurationType = "Disabled" + MLAssistConfigurationTypeEnabled MLAssistConfigurationType = "Enabled" +) + +// PossibleMLAssistConfigurationTypeValues returns the possible values for the MLAssistConfigurationType const type. +func PossibleMLAssistConfigurationTypeValues() []MLAssistConfigurationType { + return []MLAssistConfigurationType{ + MLAssistConfigurationTypeDisabled, + MLAssistConfigurationTypeEnabled, + } +} + +// MLFlowAutologgerState - Enum to determine the state of mlflow autologger. +type MLFlowAutologgerState string + +const ( + MLFlowAutologgerStateDisabled MLFlowAutologgerState = "Disabled" + MLFlowAutologgerStateEnabled MLFlowAutologgerState = "Enabled" +) + +// PossibleMLFlowAutologgerStateValues returns the possible values for the MLFlowAutologgerState const type. +func PossibleMLFlowAutologgerStateValues() []MLFlowAutologgerState { + return []MLFlowAutologgerState{ + MLFlowAutologgerStateDisabled, + MLFlowAutologgerStateEnabled, + } +} + +// ManagedNetworkStatus - Status for the managed network of a machine learning workspace. +type ManagedNetworkStatus string + +const ( + ManagedNetworkStatusActive ManagedNetworkStatus = "Active" + ManagedNetworkStatusInactive ManagedNetworkStatus = "Inactive" +) + +// PossibleManagedNetworkStatusValues returns the possible values for the ManagedNetworkStatus const type. +func PossibleManagedNetworkStatusValues() []ManagedNetworkStatus { + return []ManagedNetworkStatus{ + ManagedNetworkStatusActive, + ManagedNetworkStatusInactive, + } +} + // ManagedServiceIdentityType - Type of managed service identity (where both SystemAssigned and UserAssigned types are allowed). type ManagedServiceIdentityType string @@ -1309,6 +1951,57 @@ func PossibleManagedServiceIdentityTypeValues() []ManagedServiceIdentityType { } } +type MaterializationStoreType string + +const ( + MaterializationStoreTypeNone MaterializationStoreType = "None" + MaterializationStoreTypeOffline MaterializationStoreType = "Offline" + MaterializationStoreTypeOnline MaterializationStoreType = "Online" + MaterializationStoreTypeOnlineAndOffline MaterializationStoreType = "OnlineAndOffline" +) + +// PossibleMaterializationStoreTypeValues returns the possible values for the MaterializationStoreType const type. +func PossibleMaterializationStoreTypeValues() []MaterializationStoreType { + return []MaterializationStoreType{ + MaterializationStoreTypeNone, + MaterializationStoreTypeOffline, + MaterializationStoreTypeOnline, + MaterializationStoreTypeOnlineAndOffline, + } +} + +// MediaType - Media type of data asset. +type MediaType string + +const ( + MediaTypeImage MediaType = "Image" + MediaTypeText MediaType = "Text" +) + +// PossibleMediaTypeValues returns the possible values for the MediaType const type. +func PossibleMediaTypeValues() []MediaType { + return []MediaType{ + MediaTypeImage, + MediaTypeText, + } +} + +// MlflowAutologger - Indicates whether mlflow autologger is enabled for notebooks. +type MlflowAutologger string + +const ( + MlflowAutologgerDisabled MlflowAutologger = "Disabled" + MlflowAutologgerEnabled MlflowAutologger = "Enabled" +) + +// PossibleMlflowAutologgerValues returns the possible values for the MlflowAutologger const type. +func PossibleMlflowAutologgerValues() []MlflowAutologger { + return []MlflowAutologger{ + MlflowAutologgerDisabled, + MlflowAutologgerEnabled, + } +} + // ModelSize - Image model size. type ModelSize string @@ -1336,6 +2029,204 @@ func PossibleModelSizeValues() []ModelSize { } } +// ModelTaskType - Model task type enum. +type ModelTaskType string + +const ( + ModelTaskTypeClassification ModelTaskType = "Classification" + ModelTaskTypeQuestionAnswering ModelTaskType = "QuestionAnswering" + ModelTaskTypeRegression ModelTaskType = "Regression" +) + +// PossibleModelTaskTypeValues returns the possible values for the ModelTaskType const type. +func PossibleModelTaskTypeValues() []ModelTaskType { + return []ModelTaskType{ + ModelTaskTypeClassification, + ModelTaskTypeQuestionAnswering, + ModelTaskTypeRegression, + } +} + +// MonitorComputeIdentityType - Monitor compute identity type enum. +type MonitorComputeIdentityType string + +const ( + // MonitorComputeIdentityTypeAmlToken - Authenticates through user's AML token. + MonitorComputeIdentityTypeAmlToken MonitorComputeIdentityType = "AmlToken" + // MonitorComputeIdentityTypeManagedIdentity - Authenticates through a user-provided managed identity. + MonitorComputeIdentityTypeManagedIdentity MonitorComputeIdentityType = "ManagedIdentity" +) + +// PossibleMonitorComputeIdentityTypeValues returns the possible values for the MonitorComputeIdentityType const type. +func PossibleMonitorComputeIdentityTypeValues() []MonitorComputeIdentityType { + return []MonitorComputeIdentityType{ + MonitorComputeIdentityTypeAmlToken, + MonitorComputeIdentityTypeManagedIdentity, + } +} + +// MonitorComputeType - Monitor compute type enum. +type MonitorComputeType string + +const ( + // MonitorComputeTypeServerlessSpark - Serverless Spark compute. + MonitorComputeTypeServerlessSpark MonitorComputeType = "ServerlessSpark" +) + +// PossibleMonitorComputeTypeValues returns the possible values for the MonitorComputeType const type. +func PossibleMonitorComputeTypeValues() []MonitorComputeType { + return []MonitorComputeType{ + MonitorComputeTypeServerlessSpark, + } +} + +type MonitoringAlertNotificationType string + +const ( + // MonitoringAlertNotificationTypeAzureMonitor - Settings for Azure Monitor based alerting. + MonitoringAlertNotificationTypeAzureMonitor MonitoringAlertNotificationType = "AzureMonitor" + // MonitoringAlertNotificationTypeEmail - Settings for AML email notifications. + MonitoringAlertNotificationTypeEmail MonitoringAlertNotificationType = "Email" +) + +// PossibleMonitoringAlertNotificationTypeValues returns the possible values for the MonitoringAlertNotificationType const type. +func PossibleMonitoringAlertNotificationTypeValues() []MonitoringAlertNotificationType { + return []MonitoringAlertNotificationType{ + MonitoringAlertNotificationTypeAzureMonitor, + MonitoringAlertNotificationTypeEmail, + } +} + +type MonitoringFeatureDataType string + +const ( + // MonitoringFeatureDataTypeCategorical - Used for features of categorical data type. + MonitoringFeatureDataTypeCategorical MonitoringFeatureDataType = "Categorical" + // MonitoringFeatureDataTypeNumerical - Used for features of numerical data type. + MonitoringFeatureDataTypeNumerical MonitoringFeatureDataType = "Numerical" +) + +// PossibleMonitoringFeatureDataTypeValues returns the possible values for the MonitoringFeatureDataType const type. +func PossibleMonitoringFeatureDataTypeValues() []MonitoringFeatureDataType { + return []MonitoringFeatureDataType{ + MonitoringFeatureDataTypeCategorical, + MonitoringFeatureDataTypeNumerical, + } +} + +type MonitoringFeatureFilterType string + +const ( + // MonitoringFeatureFilterTypeAllFeatures - Includes all features. + MonitoringFeatureFilterTypeAllFeatures MonitoringFeatureFilterType = "AllFeatures" + // MonitoringFeatureFilterTypeFeatureSubset - Includes a user-defined subset of features. + MonitoringFeatureFilterTypeFeatureSubset MonitoringFeatureFilterType = "FeatureSubset" + // MonitoringFeatureFilterTypeTopNByAttribution - Only includes the top contributing features, measured by feature attribution. + MonitoringFeatureFilterTypeTopNByAttribution MonitoringFeatureFilterType = "TopNByAttribution" +) + +// PossibleMonitoringFeatureFilterTypeValues returns the possible values for the MonitoringFeatureFilterType const type. +func PossibleMonitoringFeatureFilterTypeValues() []MonitoringFeatureFilterType { + return []MonitoringFeatureFilterType{ + MonitoringFeatureFilterTypeAllFeatures, + MonitoringFeatureFilterTypeFeatureSubset, + MonitoringFeatureFilterTypeTopNByAttribution, + } +} + +// MonitoringInputDataType - Monitoring input data type enum. +type MonitoringInputDataType string + +const ( + // MonitoringInputDataTypeFixed - An input data with tabular format which doesn't require preprocessing. + MonitoringInputDataTypeFixed MonitoringInputDataType = "Fixed" + // MonitoringInputDataTypeStatic - An input data with a fixed window size. + MonitoringInputDataTypeStatic MonitoringInputDataType = "Static" + // MonitoringInputDataTypeTrailing - An input data which trailing relatively to the monitor's current run. + MonitoringInputDataTypeTrailing MonitoringInputDataType = "Trailing" +) + +// PossibleMonitoringInputDataTypeValues returns the possible values for the MonitoringInputDataType const type. +func PossibleMonitoringInputDataTypeValues() []MonitoringInputDataType { + return []MonitoringInputDataType{ + MonitoringInputDataTypeFixed, + MonitoringInputDataTypeStatic, + MonitoringInputDataTypeTrailing, + } +} + +type MonitoringModelType string + +const ( + // MonitoringModelTypeClassification - A model trained for classification tasks. + MonitoringModelTypeClassification MonitoringModelType = "Classification" + // MonitoringModelTypeRegression - A model trained for regressions tasks. + MonitoringModelTypeRegression MonitoringModelType = "Regression" +) + +// PossibleMonitoringModelTypeValues returns the possible values for the MonitoringModelType const type. +func PossibleMonitoringModelTypeValues() []MonitoringModelType { + return []MonitoringModelType{ + MonitoringModelTypeClassification, + MonitoringModelTypeRegression, + } +} + +type MonitoringNotificationMode string + +const ( + // MonitoringNotificationModeDisabled - Disabled notifications will not produce emails/metrics leveraged for alerting. + MonitoringNotificationModeDisabled MonitoringNotificationMode = "Disabled" + // MonitoringNotificationModeEnabled - Enabled notification will produce emails/metrics leveraged for alerting. + MonitoringNotificationModeEnabled MonitoringNotificationMode = "Enabled" +) + +// PossibleMonitoringNotificationModeValues returns the possible values for the MonitoringNotificationMode const type. +func PossibleMonitoringNotificationModeValues() []MonitoringNotificationMode { + return []MonitoringNotificationMode{ + MonitoringNotificationModeDisabled, + MonitoringNotificationModeEnabled, + } +} + +type MonitoringSignalType string + +const ( + // MonitoringSignalTypeCustom - Tracks a custom signal provided by users. + MonitoringSignalTypeCustom MonitoringSignalType = "Custom" + // MonitoringSignalTypeDataDrift - Tracks model input data distribution change, comparing against training data or past production + // data. + MonitoringSignalTypeDataDrift MonitoringSignalType = "DataDrift" + // MonitoringSignalTypeDataQuality - Tracks model input data integrity. + MonitoringSignalTypeDataQuality MonitoringSignalType = "DataQuality" + // MonitoringSignalTypeFeatureAttributionDrift - Tracks feature importance change in production, comparing against feature + // importance at training time. + MonitoringSignalTypeFeatureAttributionDrift MonitoringSignalType = "FeatureAttributionDrift" + // MonitoringSignalTypeGenerationSafetyQuality - Tracks the safety and quality of generated content. + MonitoringSignalTypeGenerationSafetyQuality MonitoringSignalType = "GenerationSafetyQuality" + // MonitoringSignalTypeGenerationTokenStatistics - Tracks the token usage of generative endpoints. + MonitoringSignalTypeGenerationTokenStatistics MonitoringSignalType = "GenerationTokenStatistics" + // MonitoringSignalTypeModelPerformance - Tracks model performance based on ground truth data. + MonitoringSignalTypeModelPerformance MonitoringSignalType = "ModelPerformance" + // MonitoringSignalTypePredictionDrift - Tracks prediction result data distribution change, comparing against validation/test + // label data or past production data. + MonitoringSignalTypePredictionDrift MonitoringSignalType = "PredictionDrift" +) + +// PossibleMonitoringSignalTypeValues returns the possible values for the MonitoringSignalType const type. +func PossibleMonitoringSignalTypeValues() []MonitoringSignalType { + return []MonitoringSignalType{ + MonitoringSignalTypeCustom, + MonitoringSignalTypeDataDrift, + MonitoringSignalTypeDataQuality, + MonitoringSignalTypeFeatureAttributionDrift, + MonitoringSignalTypeGenerationSafetyQuality, + MonitoringSignalTypeGenerationTokenStatistics, + MonitoringSignalTypeModelPerformance, + MonitoringSignalTypePredictionDrift, + } +} + // MountAction - Mount Action. type MountAction string @@ -1376,6 +2267,22 @@ func PossibleMountStateValues() []MountState { } } +// MultiSelect - Whether multiSelect is enabled +type MultiSelect string + +const ( + MultiSelectDisabled MultiSelect = "Disabled" + MultiSelectEnabled MultiSelect = "Enabled" +) + +// PossibleMultiSelectValues returns the possible values for the MultiSelect const type. +func PossibleMultiSelectValues() []MultiSelect { + return []MultiSelect{ + MultiSelectDisabled, + MultiSelectEnabled, + } +} + // NCrossValidationsMode - Determines how N-Cross validations value is determined. type NCrossValidationsMode string @@ -1411,6 +2318,39 @@ func PossibleNetworkValues() []Network { } } +// NlpLearningRateScheduler - Enum of learning rate schedulers that aligns with those supported by HF +type NlpLearningRateScheduler string + +const ( + // NlpLearningRateSchedulerConstant - Constant learning rate. + NlpLearningRateSchedulerConstant NlpLearningRateScheduler = "Constant" + // NlpLearningRateSchedulerConstantWithWarmup - Linear warmup followed by constant value. + NlpLearningRateSchedulerConstantWithWarmup NlpLearningRateScheduler = "ConstantWithWarmup" + // NlpLearningRateSchedulerCosine - Linear warmup then cosine decay. + NlpLearningRateSchedulerCosine NlpLearningRateScheduler = "Cosine" + // NlpLearningRateSchedulerCosineWithRestarts - Linear warmup, cosine decay, then restart to initial LR. + NlpLearningRateSchedulerCosineWithRestarts NlpLearningRateScheduler = "CosineWithRestarts" + // NlpLearningRateSchedulerLinear - Linear warmup and decay. + NlpLearningRateSchedulerLinear NlpLearningRateScheduler = "Linear" + // NlpLearningRateSchedulerNone - No learning rate schedule. + NlpLearningRateSchedulerNone NlpLearningRateScheduler = "None" + // NlpLearningRateSchedulerPolynomial - Increase linearly then polynomially decay. + NlpLearningRateSchedulerPolynomial NlpLearningRateScheduler = "Polynomial" +) + +// PossibleNlpLearningRateSchedulerValues returns the possible values for the NlpLearningRateScheduler const type. +func PossibleNlpLearningRateSchedulerValues() []NlpLearningRateScheduler { + return []NlpLearningRateScheduler{ + NlpLearningRateSchedulerConstant, + NlpLearningRateSchedulerConstantWithWarmup, + NlpLearningRateSchedulerCosine, + NlpLearningRateSchedulerCosineWithRestarts, + NlpLearningRateSchedulerLinear, + NlpLearningRateSchedulerNone, + NlpLearningRateSchedulerPolynomial, + } +} + // NodeState - State of the compute node. Values are idle, running, preparing, unusable, leaving and preempted. type NodeState string @@ -1435,6 +2375,89 @@ func PossibleNodeStateValues() []NodeState { } } +// NodesValueType - The enumerated types for the nodes value +type NodesValueType string + +const ( + NodesValueTypeAll NodesValueType = "All" + NodesValueTypeCustom NodesValueType = "Custom" +) + +// PossibleNodesValueTypeValues returns the possible values for the NodesValueType const type. +func PossibleNodesValueTypeValues() []NodesValueType { + return []NodesValueType{ + NodesValueTypeAll, + NodesValueTypeCustom, + } +} + +type NumericalDataDriftMetric string + +const ( + // NumericalDataDriftMetricJensenShannonDistance - The Jensen Shannon Distance (JSD) metric. + NumericalDataDriftMetricJensenShannonDistance NumericalDataDriftMetric = "JensenShannonDistance" + // NumericalDataDriftMetricNormalizedWassersteinDistance - The Normalized Wasserstein Distance metric. + NumericalDataDriftMetricNormalizedWassersteinDistance NumericalDataDriftMetric = "NormalizedWassersteinDistance" + // NumericalDataDriftMetricPopulationStabilityIndex - The Population Stability Index (PSI) metric. + NumericalDataDriftMetricPopulationStabilityIndex NumericalDataDriftMetric = "PopulationStabilityIndex" + // NumericalDataDriftMetricTwoSampleKolmogorovSmirnovTest - The Two Sample Kolmogorov-Smirnov Test (two-sample K–S) metric. + NumericalDataDriftMetricTwoSampleKolmogorovSmirnovTest NumericalDataDriftMetric = "TwoSampleKolmogorovSmirnovTest" +) + +// PossibleNumericalDataDriftMetricValues returns the possible values for the NumericalDataDriftMetric const type. +func PossibleNumericalDataDriftMetricValues() []NumericalDataDriftMetric { + return []NumericalDataDriftMetric{ + NumericalDataDriftMetricJensenShannonDistance, + NumericalDataDriftMetricNormalizedWassersteinDistance, + NumericalDataDriftMetricPopulationStabilityIndex, + NumericalDataDriftMetricTwoSampleKolmogorovSmirnovTest, + } +} + +type NumericalDataQualityMetric string + +const ( + // NumericalDataQualityMetricDataTypeErrorRate - Calculates the rate of data type errors. + NumericalDataQualityMetricDataTypeErrorRate NumericalDataQualityMetric = "DataTypeErrorRate" + // NumericalDataQualityMetricNullValueRate - Calculates the rate of null values. + NumericalDataQualityMetricNullValueRate NumericalDataQualityMetric = "NullValueRate" + // NumericalDataQualityMetricOutOfBoundsRate - Calculates the rate values are out of bounds. + NumericalDataQualityMetricOutOfBoundsRate NumericalDataQualityMetric = "OutOfBoundsRate" +) + +// PossibleNumericalDataQualityMetricValues returns the possible values for the NumericalDataQualityMetric const type. +func PossibleNumericalDataQualityMetricValues() []NumericalDataQualityMetric { + return []NumericalDataQualityMetric{ + NumericalDataQualityMetricDataTypeErrorRate, + NumericalDataQualityMetricNullValueRate, + NumericalDataQualityMetricOutOfBoundsRate, + } +} + +type NumericalPredictionDriftMetric string + +const ( + // NumericalPredictionDriftMetricJensenShannonDistance - The Jensen Shannon Distance (JSD) metric. + NumericalPredictionDriftMetricJensenShannonDistance NumericalPredictionDriftMetric = "JensenShannonDistance" + // NumericalPredictionDriftMetricNormalizedWassersteinDistance - The Normalized Wasserstein Distance metric. + NumericalPredictionDriftMetricNormalizedWassersteinDistance NumericalPredictionDriftMetric = "NormalizedWassersteinDistance" + // NumericalPredictionDriftMetricPopulationStabilityIndex - The Population Stability Index (PSI) metric. + NumericalPredictionDriftMetricPopulationStabilityIndex NumericalPredictionDriftMetric = "PopulationStabilityIndex" + // NumericalPredictionDriftMetricTwoSampleKolmogorovSmirnovTest - The Two Sample Kolmogorov-Smirnov Test (two-sample K–S) + // metric. + NumericalPredictionDriftMetricTwoSampleKolmogorovSmirnovTest NumericalPredictionDriftMetric = "TwoSampleKolmogorovSmirnovTest" +) + +// PossibleNumericalPredictionDriftMetricValues returns the possible values for the NumericalPredictionDriftMetric const type. +func PossibleNumericalPredictionDriftMetricValues() []NumericalPredictionDriftMetric { + return []NumericalPredictionDriftMetric{ + NumericalPredictionDriftMetricJensenShannonDistance, + NumericalPredictionDriftMetricNormalizedWassersteinDistance, + NumericalPredictionDriftMetricPopulationStabilityIndex, + NumericalPredictionDriftMetricTwoSampleKolmogorovSmirnovTest, + } +} + // ObjectDetectionPrimaryMetrics - Primary metrics for Image ObjectDetection task. type ObjectDetectionPrimaryMetrics string @@ -1451,6 +2474,20 @@ func PossibleObjectDetectionPrimaryMetricsValues() []ObjectDetectionPrimaryMetri } } +// OneLakeArtifactType - Enum to determine OneLake artifact type. +type OneLakeArtifactType string + +const ( + OneLakeArtifactTypeLakeHouse OneLakeArtifactType = "LakeHouse" +) + +// PossibleOneLakeArtifactTypeValues returns the possible values for the OneLakeArtifactType const type. +func PossibleOneLakeArtifactTypeValues() []OneLakeArtifactType { + return []OneLakeArtifactType{ + OneLakeArtifactTypeLakeHouse, + } +} + // OperatingSystemType - The type of operating system. type OperatingSystemType string @@ -1576,6 +2613,7 @@ func PossibleOsTypeValues() []OsType { type OutputDeliveryMode string const ( + OutputDeliveryModeDirect OutputDeliveryMode = "Direct" OutputDeliveryModeReadWriteMount OutputDeliveryMode = "ReadWriteMount" OutputDeliveryModeUpload OutputDeliveryMode = "Upload" ) @@ -1583,11 +2621,94 @@ const ( // PossibleOutputDeliveryModeValues returns the possible values for the OutputDeliveryMode const type. func PossibleOutputDeliveryModeValues() []OutputDeliveryMode { return []OutputDeliveryMode{ + OutputDeliveryModeDirect, OutputDeliveryModeReadWriteMount, OutputDeliveryModeUpload, } } +// PackageBuildState - Package build state returned in package response. +type PackageBuildState string + +const ( + PackageBuildStateFailed PackageBuildState = "Failed" + PackageBuildStateNotStarted PackageBuildState = "NotStarted" + PackageBuildStateRunning PackageBuildState = "Running" + PackageBuildStateSucceeded PackageBuildState = "Succeeded" +) + +// PossiblePackageBuildStateValues returns the possible values for the PackageBuildState const type. +func PossiblePackageBuildStateValues() []PackageBuildState { + return []PackageBuildState{ + PackageBuildStateFailed, + PackageBuildStateNotStarted, + PackageBuildStateRunning, + PackageBuildStateSucceeded, + } +} + +// PackageInputDeliveryMode - Mounting type of the model or the inputs +type PackageInputDeliveryMode string + +const ( + PackageInputDeliveryModeCopy PackageInputDeliveryMode = "Copy" + PackageInputDeliveryModeDownload PackageInputDeliveryMode = "Download" +) + +// PossiblePackageInputDeliveryModeValues returns the possible values for the PackageInputDeliveryMode const type. +func PossiblePackageInputDeliveryModeValues() []PackageInputDeliveryMode { + return []PackageInputDeliveryMode{ + PackageInputDeliveryModeCopy, + PackageInputDeliveryModeDownload, + } +} + +// PackageInputType - Type of the inputs. +type PackageInputType string + +const ( + PackageInputTypeURIFile PackageInputType = "UriFile" + PackageInputTypeURIFolder PackageInputType = "UriFolder" +) + +// PossiblePackageInputTypeValues returns the possible values for the PackageInputType const type. +func PossiblePackageInputTypeValues() []PackageInputType { + return []PackageInputType{ + PackageInputTypeURIFile, + PackageInputTypeURIFolder, + } +} + +// PendingUploadCredentialType - Enum to determine the PendingUpload credentials type. +type PendingUploadCredentialType string + +const ( + PendingUploadCredentialTypeSAS PendingUploadCredentialType = "SAS" +) + +// PossiblePendingUploadCredentialTypeValues returns the possible values for the PendingUploadCredentialType const type. +func PossiblePendingUploadCredentialTypeValues() []PendingUploadCredentialType { + return []PendingUploadCredentialType{ + PendingUploadCredentialTypeSAS, + } +} + +// PendingUploadType - Type of storage to use for the pending upload location +type PendingUploadType string + +const ( + PendingUploadTypeNone PendingUploadType = "None" + PendingUploadTypeTemporaryBlobReference PendingUploadType = "TemporaryBlobReference" +) + +// PossiblePendingUploadTypeValues returns the possible values for the PendingUploadType const type. +func PossiblePendingUploadTypeValues() []PendingUploadType { + return []PendingUploadType{ + PendingUploadTypeNone, + PendingUploadTypeTemporaryBlobReference, + } +} + // PrivateEndpointConnectionProvisioningState - The current provisioning state. type PrivateEndpointConnectionProvisioningState string @@ -1608,30 +2729,44 @@ func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpoin } } -// PrivateEndpointServiceConnectionStatus - The private endpoint connection status. -type PrivateEndpointServiceConnectionStatus string +// ProtectionLevel - Protection level associated with the Intellectual Property. +type ProtectionLevel string const ( - PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved" - PrivateEndpointServiceConnectionStatusDisconnected PrivateEndpointServiceConnectionStatus = "Disconnected" - PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending" - PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected" - PrivateEndpointServiceConnectionStatusTimeout PrivateEndpointServiceConnectionStatus = "Timeout" + // ProtectionLevelAll - All means Intellectual Property is fully protected. + ProtectionLevelAll ProtectionLevel = "All" + // ProtectionLevelNone - None means it is not an Intellectual Property. + ProtectionLevelNone ProtectionLevel = "None" ) -// PossiblePrivateEndpointServiceConnectionStatusValues returns the possible values for the PrivateEndpointServiceConnectionStatus const type. -func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus { - return []PrivateEndpointServiceConnectionStatus{ - PrivateEndpointServiceConnectionStatusApproved, - PrivateEndpointServiceConnectionStatusDisconnected, - PrivateEndpointServiceConnectionStatusPending, - PrivateEndpointServiceConnectionStatusRejected, - PrivateEndpointServiceConnectionStatusTimeout, +// PossibleProtectionLevelValues returns the possible values for the ProtectionLevel const type. +func PossibleProtectionLevelValues() []ProtectionLevel { + return []ProtectionLevel{ + ProtectionLevelAll, + ProtectionLevelNone, } } -// ProvisioningState - The current deployment state of workspace resource. The provisioningState is to indicate states for -// resource provisioning. +// Protocol - Protocol over which communication will happen over this endpoint +type Protocol string + +const ( + ProtocolHTTP Protocol = "http" + ProtocolTCP Protocol = "tcp" + ProtocolUDP Protocol = "udp" +) + +// PossibleProtocolValues returns the possible values for the Protocol const type. +func PossibleProtocolValues() []Protocol { + return []Protocol{ + ProtocolHTTP, + ProtocolTCP, + ProtocolUDP, + } +} + +// ProvisioningState - The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and +// Failed. type ProvisioningState string const ( @@ -1675,22 +2810,6 @@ func PossibleProvisioningStatusValues() []ProvisioningStatus { } } -// PublicNetworkAccess - Whether requests from Public Network are allowed. -type PublicNetworkAccess string - -const ( - PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" - PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" -) - -// PossiblePublicNetworkAccessValues returns the possible values for the PublicNetworkAccess const type. -func PossiblePublicNetworkAccessValues() []PublicNetworkAccess { - return []PublicNetworkAccess{ - PublicNetworkAccessDisabled, - PublicNetworkAccessEnabled, - } -} - // PublicNetworkAccessType - Enum to determine whether PublicNetworkAccess is Enabled or Disabled. type PublicNetworkAccessType string @@ -1782,6 +2901,26 @@ func PossibleReferenceTypeValues() []ReferenceType { } } +type RegressionModelPerformanceMetric string + +const ( + // RegressionModelPerformanceMetricMeanAbsoluteError - The Mean Absolute Error (MAE) metric. + RegressionModelPerformanceMetricMeanAbsoluteError RegressionModelPerformanceMetric = "MeanAbsoluteError" + // RegressionModelPerformanceMetricMeanSquaredError - The Mean Squared Error (MSE) metric. + RegressionModelPerformanceMetricMeanSquaredError RegressionModelPerformanceMetric = "MeanSquaredError" + // RegressionModelPerformanceMetricRootMeanSquaredError - The Root Mean Squared Error (RMSE) metric. + RegressionModelPerformanceMetricRootMeanSquaredError RegressionModelPerformanceMetric = "RootMeanSquaredError" +) + +// PossibleRegressionModelPerformanceMetricValues returns the possible values for the RegressionModelPerformanceMetric const type. +func PossibleRegressionModelPerformanceMetricValues() []RegressionModelPerformanceMetric { + return []RegressionModelPerformanceMetric{ + RegressionModelPerformanceMetricMeanAbsoluteError, + RegressionModelPerformanceMetricMeanSquaredError, + RegressionModelPerformanceMetricRootMeanSquaredError, + } +} + // RegressionModels - Enum for all Regression models supported by AutoML. type RegressionModels string @@ -1810,7 +2949,7 @@ const ( // RegressionModelsLightGBM - LightGBM is a gradient boosting framework that uses tree based learning algorithms. RegressionModelsLightGBM RegressionModels = "LightGBM" // RegressionModelsRandomForest - Random forest is a supervised learning algorithm. - // The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. + // The "forest" it builds, is an ensemble of decision trees, usually trained with the bagging method. // The general idea of the bagging method is that a combination of learning models increases the overall result. RegressionModelsRandomForest RegressionModels = "RandomForest" // RegressionModelsSGD - SGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications @@ -1888,6 +3027,95 @@ func PossibleRemoteLoginPortPublicAccessValues() []RemoteLoginPortPublicAccess { } } +type RollingRateType string + +const ( + RollingRateTypeDay RollingRateType = "Day" + RollingRateTypeHour RollingRateType = "Hour" + RollingRateTypeMinute RollingRateType = "Minute" + RollingRateTypeMonth RollingRateType = "Month" + RollingRateTypeYear RollingRateType = "Year" +) + +// PossibleRollingRateTypeValues returns the possible values for the RollingRateType const type. +func PossibleRollingRateTypeValues() []RollingRateType { + return []RollingRateType{ + RollingRateTypeDay, + RollingRateTypeHour, + RollingRateTypeMinute, + RollingRateTypeMonth, + RollingRateTypeYear, + } +} + +// RuleAction - The action enum for networking rule. +type RuleAction string + +const ( + RuleActionAllow RuleAction = "Allow" + RuleActionDeny RuleAction = "Deny" +) + +// PossibleRuleActionValues returns the possible values for the RuleAction const type. +func PossibleRuleActionValues() []RuleAction { + return []RuleAction{ + RuleActionAllow, + RuleActionDeny, + } +} + +// RuleCategory - Category of a managed network Outbound Rule of a machine learning workspace. +type RuleCategory string + +const ( + RuleCategoryRecommended RuleCategory = "Recommended" + RuleCategoryRequired RuleCategory = "Required" + RuleCategoryUserDefined RuleCategory = "UserDefined" +) + +// PossibleRuleCategoryValues returns the possible values for the RuleCategory const type. +func PossibleRuleCategoryValues() []RuleCategory { + return []RuleCategory{ + RuleCategoryRecommended, + RuleCategoryRequired, + RuleCategoryUserDefined, + } +} + +// RuleStatus - Type of a managed network Outbound Rule of a machine learning workspace. +type RuleStatus string + +const ( + RuleStatusActive RuleStatus = "Active" + RuleStatusInactive RuleStatus = "Inactive" +) + +// PossibleRuleStatusValues returns the possible values for the RuleStatus const type. +func PossibleRuleStatusValues() []RuleStatus { + return []RuleStatus{ + RuleStatusActive, + RuleStatusInactive, + } +} + +// RuleType - Type of a managed network Outbound Rule of a machine learning workspace. +type RuleType string + +const ( + RuleTypeFQDN RuleType = "FQDN" + RuleTypePrivateEndpoint RuleType = "PrivateEndpoint" + RuleTypeServiceTag RuleType = "ServiceTag" +) + +// PossibleRuleTypeValues returns the possible values for the RuleType const type. +func PossibleRuleTypeValues() []RuleType { + return []RuleType{ + RuleTypeFQDN, + RuleTypePrivateEndpoint, + RuleTypeServiceTag, + } +} + // SKUScaleType - Node scaling setting for the compute sku. type SKUScaleType string @@ -2002,6 +3230,8 @@ type ScheduleActionType string const ( ScheduleActionTypeCreateJob ScheduleActionType = "CreateJob" + ScheduleActionTypeCreateMonitor ScheduleActionType = "CreateMonitor" + ScheduleActionTypeImportData ScheduleActionType = "ImportData" ScheduleActionTypeInvokeBatchEndpoint ScheduleActionType = "InvokeBatchEndpoint" ) @@ -2009,6 +3239,8 @@ const ( func PossibleScheduleActionTypeValues() []ScheduleActionType { return []ScheduleActionType{ ScheduleActionTypeCreateJob, + ScheduleActionTypeCreateMonitor, + ScheduleActionTypeImportData, ScheduleActionTypeInvokeBatchEndpoint, } } @@ -2111,6 +3343,8 @@ type SecretsType string const ( SecretsTypeAccountKey SecretsType = "AccountKey" SecretsTypeCertificate SecretsType = "Certificate" + SecretsTypeKerberosKeytab SecretsType = "KerberosKeytab" + SecretsTypeKerberosPassword SecretsType = "KerberosPassword" SecretsTypeSas SecretsType = "Sas" SecretsTypeServicePrincipal SecretsType = "ServicePrincipal" ) @@ -2120,6 +3354,8 @@ func PossibleSecretsTypeValues() []SecretsType { return []SecretsType{ SecretsTypeAccountKey, SecretsTypeCertificate, + SecretsTypeKerberosKeytab, + SecretsTypeKerberosPassword, SecretsTypeSas, SecretsTypeServicePrincipal, } @@ -2190,6 +3426,21 @@ func PossibleSourceTypeValues() []SourceType { } } +type SparkJobEntryType string + +const ( + SparkJobEntryTypeSparkJobPythonEntry SparkJobEntryType = "SparkJobPythonEntry" + SparkJobEntryTypeSparkJobScalaEntry SparkJobEntryType = "SparkJobScalaEntry" +) + +// PossibleSparkJobEntryTypeValues returns the possible values for the SparkJobEntryType const type. +func PossibleSparkJobEntryTypeValues() []SparkJobEntryType { + return []SparkJobEntryType{ + SparkJobEntryTypeSparkJobPythonEntry, + SparkJobEntryTypeSparkJobScalaEntry, + } +} + // StackMetaLearnerType - The meta-learner is a model trained on the output of the individual heterogeneous models. Default // meta-learners are LogisticRegression for classification tasks (or LogisticRegressionCV if // cross-validation is enabled) and ElasticNet for regression/forecasting tasks (or ElasticNetCV if cross-validation is enabled). @@ -2255,6 +3506,23 @@ func PossibleStatusValues() []Status { } } +type StatusMessageLevel string + +const ( + StatusMessageLevelError StatusMessageLevel = "Error" + StatusMessageLevelInformation StatusMessageLevel = "Information" + StatusMessageLevelWarning StatusMessageLevel = "Warning" +) + +// PossibleStatusMessageLevelValues returns the possible values for the StatusMessageLevel const type. +func PossibleStatusMessageLevelValues() []StatusMessageLevel { + return []StatusMessageLevel{ + StatusMessageLevelError, + StatusMessageLevelInformation, + StatusMessageLevelWarning, + } +} + // StochasticOptimizer - Stochastic optimizer for image models. type StochasticOptimizer string @@ -2414,6 +3682,43 @@ func PossibleTaskTypeValues() []TaskType { } } +// TextAnnotationType - Annotation type of text data. +type TextAnnotationType string + +const ( + TextAnnotationTypeClassification TextAnnotationType = "Classification" + TextAnnotationTypeNamedEntityRecognition TextAnnotationType = "NamedEntityRecognition" +) + +// PossibleTextAnnotationTypeValues returns the possible values for the TextAnnotationType const type. +func PossibleTextAnnotationTypeValues() []TextAnnotationType { + return []TextAnnotationType{ + TextAnnotationTypeClassification, + TextAnnotationTypeNamedEntityRecognition, + } +} + +// TrainingMode - Training mode dictates whether to use distributed training or not +type TrainingMode string + +const ( + // TrainingModeAuto - Auto mode + TrainingModeAuto TrainingMode = "Auto" + // TrainingModeDistributed - Distributed training mode + TrainingModeDistributed TrainingMode = "Distributed" + // TrainingModeNonDistributed - Non distributed training mode + TrainingModeNonDistributed TrainingMode = "NonDistributed" +) + +// PossibleTrainingModeValues returns the possible values for the TrainingMode const type. +func PossibleTrainingModeValues() []TrainingMode { + return []TrainingMode{ + TrainingModeAuto, + TrainingModeDistributed, + TrainingModeNonDistributed, + } +} + type TriggerType string const ( @@ -2565,17 +3870,37 @@ func PossibleValidationMetricTypeValues() []ValidationMetricType { } } -// ValueFormat - format for the workspace connection value -type ValueFormat string +// VolumeDefinitionType - Type of Volume Definition. Possible Values: bind,volume,tmpfs,npipe +type VolumeDefinitionType string + +const ( + VolumeDefinitionTypeBind VolumeDefinitionType = "bind" + VolumeDefinitionTypeNpipe VolumeDefinitionType = "npipe" + VolumeDefinitionTypeTmpfs VolumeDefinitionType = "tmpfs" + VolumeDefinitionTypeVolume VolumeDefinitionType = "volume" +) + +// PossibleVolumeDefinitionTypeValues returns the possible values for the VolumeDefinitionType const type. +func PossibleVolumeDefinitionTypeValues() []VolumeDefinitionType { + return []VolumeDefinitionType{ + VolumeDefinitionTypeBind, + VolumeDefinitionTypeNpipe, + VolumeDefinitionTypeTmpfs, + VolumeDefinitionTypeVolume, + } +} + +// WebhookType - Enum to determine the webhook callback service type. +type WebhookType string const ( - ValueFormatJSON ValueFormat = "JSON" + WebhookTypeAzureDevOps WebhookType = "AzureDevOps" ) -// PossibleValueFormatValues returns the possible values for the ValueFormat const type. -func PossibleValueFormatValues() []ValueFormat { - return []ValueFormat{ - ValueFormatJSON, +// PossibleWebhookTypeValues returns the possible values for the WebhookType const type. +func PossibleWebhookTypeValues() []WebhookType { + return []WebhookType{ + WebhookTypeAzureDevOps, } } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/datacontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/datacontainers_client.go index ef6431306899..3a6f597b0f25 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/datacontainers_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/datacontainers_client.go @@ -47,7 +47,7 @@ func NewDataContainersClient(subscriptionID string, credential azcore.TokenCrede // CreateOrUpdate - Create or update container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -93,7 +93,7 @@ func (client *DataContainersClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -111,7 +111,7 @@ func (client *DataContainersClient) createOrUpdateHandleResponse(resp *http.Resp // Delete - Delete container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -155,7 +155,7 @@ func (client *DataContainersClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -164,7 +164,7 @@ func (client *DataContainersClient) deleteCreateRequest(ctx context.Context, res // Get - Get container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -208,7 +208,7 @@ func (client *DataContainersClient) getCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *DataContainersClient) getHandleResponse(resp *http.Response) (Data // NewListPager - List data containers. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - DataContainersClientListOptions contains the optional parameters for the DataContainersClient.NewListPager method. @@ -277,7 +277,7 @@ func (client *DataContainersClient) listCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/datacontainers_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/datacontainers_client_example_test.go deleted file mode 100644 index 92aa8b6126b6..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/datacontainers_client_example_test.go +++ /dev/null @@ -1,219 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataContainer/list.json -func ExampleDataContainersClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewDataContainersClient().NewListPager("testrg123", "workspace123", &armmachinelearning.DataContainersClientListOptions{Skip: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.DataContainerResourceArmPaginatedResult = armmachinelearning.DataContainerResourceArmPaginatedResult{ - // Value: []*armmachinelearning.DataContainer{ - // { - // Name: to.Ptr("datastore123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/data"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspace123/data/datacontainer123"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.DataContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "properties1": to.Ptr("value1"), - // "properties2": to.Ptr("value2"), - // }, - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // DataType: to.Ptr(armmachinelearning.DataType("UriFile")), - // }, - // }, - // { - // Name: to.Ptr("datastore124"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/data"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspace123/data/datacontainer124"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.DataContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "properties1": to.Ptr("value1"), - // "properties2": to.Ptr("value2"), - // }, - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // DataType: to.Ptr(armmachinelearning.DataType("UriFile")), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataContainer/delete.json -func ExampleDataContainersClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewDataContainersClient().Delete(ctx, "testrg123", "workspace123", "datacontainer123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataContainer/get.json -func ExampleDataContainersClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDataContainersClient().Get(ctx, "testrg123", "workspace123", "datacontainer123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.DataContainer = armmachinelearning.DataContainer{ - // Name: to.Ptr("datacontainer123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/data"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspace123/data/datacontainer123"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.DataContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "properties1": to.Ptr("value1"), - // "properties2": to.Ptr("value2"), - // }, - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // DataType: to.Ptr(armmachinelearning.DataType("UriFile")), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataContainer/createOrUpdate.json -func ExampleDataContainersClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDataContainersClient().CreateOrUpdate(ctx, "testrg123", "workspace123", "datacontainer123", armmachinelearning.DataContainer{ - Properties: &armmachinelearning.DataContainerProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "properties1": to.Ptr("value1"), - "properties2": to.Ptr("value2"), - }, - Tags: map[string]*string{ - "tag1": to.Ptr("value1"), - "tag2": to.Ptr("value2"), - }, - DataType: to.Ptr(armmachinelearning.DataType("UriFile")), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.DataContainer = armmachinelearning.DataContainer{ - // Name: to.Ptr("datacontainer123"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/data"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspace123/data/datacontainer123"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.DataContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "properties1": to.Ptr("value1"), - // "properties2": to.Ptr("value2"), - // }, - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // DataType: to.Ptr(armmachinelearning.DataType("UriFile")), - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/datastores_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/datastores_client.go index 5ccc88087d0c..1830beca01c2 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/datastores_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/datastores_client.go @@ -48,7 +48,7 @@ func NewDatastoresClient(subscriptionID string, credential azcore.TokenCredentia // CreateOrUpdate - Create or update datastore. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Datastore name. @@ -94,7 +94,7 @@ func (client *DatastoresClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.SkipValidation != nil { reqQP.Set("skipValidation", strconv.FormatBool(*options.SkipValidation)) } @@ -115,7 +115,7 @@ func (client *DatastoresClient) createOrUpdateHandleResponse(resp *http.Response // Delete - Delete datastore. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Datastore name. @@ -159,7 +159,7 @@ func (client *DatastoresClient) deleteCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -168,7 +168,7 @@ func (client *DatastoresClient) deleteCreateRequest(ctx context.Context, resourc // Get - Get datastore. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Datastore name. @@ -212,7 +212,7 @@ func (client *DatastoresClient) getCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -229,7 +229,7 @@ func (client *DatastoresClient) getHandleResponse(resp *http.Response) (Datastor // NewListPager - List datastores. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - DatastoresClientListOptions contains the optional parameters for the DatastoresClient.NewListPager method. @@ -281,7 +281,7 @@ func (client *DatastoresClient) listCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } @@ -320,7 +320,7 @@ func (client *DatastoresClient) listHandleResponse(resp *http.Response) (Datasto // ListSecrets - Get datastore secrets. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Datastore name. @@ -364,7 +364,7 @@ func (client *DatastoresClient) listSecretsCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/datastores_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/datastores_client_example_test.go deleted file mode 100644 index 1a5a5f4758d6..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/datastores_client_example_test.go +++ /dev/null @@ -1,449 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/list.json -func ExampleDatastoresClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewDatastoresClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.DatastoresClientListOptions{Skip: nil, - Count: to.Ptr[int32](1), - IsDefault: to.Ptr(false), - Names: []string{ - "string"}, - SearchText: to.Ptr("string"), - OrderBy: to.Ptr("string"), - OrderByAsc: to.Ptr(false), - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.DatastoreResourceArmPaginatedResult = armmachinelearning.DatastoreResourceArmPaginatedResult{ - // Value: []*armmachinelearning.Datastore{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.AzureBlobDatastore{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Credentials: &armmachinelearning.AccountKeyDatastoreCredentials{ - // CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeAccountKey), - // }, - // DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureBlob), - // IsDefault: to.Ptr(false), - // AccountName: to.Ptr("string"), - // ContainerName: to.Ptr("string"), - // Endpoint: to.Ptr("core.windows.net"), - // Protocol: to.Ptr("https"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/delete.json -func ExampleDatastoresClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewDatastoresClient().Delete(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/get.json -func ExampleDatastoresClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDatastoresClient().Get(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Datastore = armmachinelearning.Datastore{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.AzureBlobDatastore{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Credentials: &armmachinelearning.AccountKeyDatastoreCredentials{ - // CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeAccountKey), - // }, - // DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureBlob), - // IsDefault: to.Ptr(false), - // AccountName: to.Ptr("string"), - // ContainerName: to.Ptr("string"), - // Endpoint: to.Ptr("core.windows.net"), - // Protocol: to.Ptr("https"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/AzureDataLakeGen1WServicePrincipal/createOrUpdate.json -func ExampleDatastoresClient_CreateOrUpdate_createOrUpdateDatastoreAzureDataLakeGen1WServicePrincipal() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDatastoresClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.Datastore{ - Properties: &armmachinelearning.AzureDataLakeGen1Datastore{ - Description: to.Ptr("string"), - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - Credentials: &armmachinelearning.ServicePrincipalDatastoreCredentials{ - CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeServicePrincipal), - AuthorityURL: to.Ptr("string"), - ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - ResourceURL: to.Ptr("string"), - Secrets: &armmachinelearning.ServicePrincipalDatastoreSecrets{ - SecretsType: to.Ptr(armmachinelearning.SecretsTypeServicePrincipal), - ClientSecret: to.Ptr("string"), - }, - TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - }, - DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureDataLakeGen1), - StoreName: to.Ptr("string"), - }, - }, &armmachinelearning.DatastoresClientCreateOrUpdateOptions{SkipValidation: to.Ptr(false)}) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Datastore = armmachinelearning.Datastore{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.AzureDataLakeGen1Datastore{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Credentials: &armmachinelearning.ServicePrincipalDatastoreCredentials{ - // CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeServicePrincipal), - // AuthorityURL: to.Ptr("string"), - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // ResourceURL: to.Ptr("string"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureDataLakeGen1), - // StoreName: to.Ptr("string"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/AzureDataLakeGen2WServicePrincipal/createOrUpdate.json -func ExampleDatastoresClient_CreateOrUpdate_createOrUpdateDatastoreAzureDataLakeGen2WServicePrincipal() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDatastoresClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.Datastore{ - Properties: &armmachinelearning.AzureDataLakeGen2Datastore{ - Description: to.Ptr("string"), - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - Credentials: &armmachinelearning.ServicePrincipalDatastoreCredentials{ - CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeServicePrincipal), - AuthorityURL: to.Ptr("string"), - ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - ResourceURL: to.Ptr("string"), - Secrets: &armmachinelearning.ServicePrincipalDatastoreSecrets{ - SecretsType: to.Ptr(armmachinelearning.SecretsTypeServicePrincipal), - ClientSecret: to.Ptr("string"), - }, - TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - }, - DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureDataLakeGen2), - AccountName: to.Ptr("string"), - Endpoint: to.Ptr("string"), - Filesystem: to.Ptr("string"), - Protocol: to.Ptr("string"), - }, - }, &armmachinelearning.DatastoresClientCreateOrUpdateOptions{SkipValidation: to.Ptr(false)}) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Datastore = armmachinelearning.Datastore{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.AzureDataLakeGen2Datastore{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Credentials: &armmachinelearning.ServicePrincipalDatastoreCredentials{ - // CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeServicePrincipal), - // AuthorityURL: to.Ptr("string"), - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // ResourceURL: to.Ptr("string"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureDataLakeGen2), - // AccountName: to.Ptr("string"), - // Endpoint: to.Ptr("string"), - // Filesystem: to.Ptr("string"), - // Protocol: to.Ptr("string"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/AzureFileWAccountKey/createOrUpdate.json -func ExampleDatastoresClient_CreateOrUpdate_createOrUpdateDatastoreAzureFileStoreWAccountKey() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDatastoresClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.Datastore{ - Properties: &armmachinelearning.AzureFileDatastore{ - Description: to.Ptr("string"), - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - Credentials: &armmachinelearning.AccountKeyDatastoreCredentials{ - CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeAccountKey), - Secrets: &armmachinelearning.AccountKeyDatastoreSecrets{ - SecretsType: to.Ptr(armmachinelearning.SecretsTypeAccountKey), - Key: to.Ptr("string"), - }, - }, - DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureFile), - AccountName: to.Ptr("string"), - Endpoint: to.Ptr("string"), - FileShareName: to.Ptr("string"), - Protocol: to.Ptr("string"), - }, - }, &armmachinelearning.DatastoresClientCreateOrUpdateOptions{SkipValidation: to.Ptr(false)}) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Datastore = armmachinelearning.Datastore{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.AzureFileDatastore{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Credentials: &armmachinelearning.AccountKeyDatastoreCredentials{ - // CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeAccountKey), - // }, - // DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureFile), - // AccountName: to.Ptr("string"), - // Endpoint: to.Ptr("string"), - // FileShareName: to.Ptr("string"), - // Protocol: to.Ptr("string"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/AzureBlobWAccountKey/createOrUpdate.json -func ExampleDatastoresClient_CreateOrUpdate_createOrUpdateDatastoreAzureBlobWAccountKey() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDatastoresClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.Datastore{ - Properties: &armmachinelearning.AzureBlobDatastore{ - Description: to.Ptr("string"), - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - Credentials: &armmachinelearning.AccountKeyDatastoreCredentials{ - CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeAccountKey), - Secrets: &armmachinelearning.AccountKeyDatastoreSecrets{ - SecretsType: to.Ptr(armmachinelearning.SecretsTypeAccountKey), - Key: to.Ptr("string"), - }, - }, - DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureBlob), - AccountName: to.Ptr("string"), - ContainerName: to.Ptr("string"), - Endpoint: to.Ptr("core.windows.net"), - Protocol: to.Ptr("https"), - }, - }, &armmachinelearning.DatastoresClientCreateOrUpdateOptions{SkipValidation: to.Ptr(false)}) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Datastore = armmachinelearning.Datastore{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.AzureBlobDatastore{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Credentials: &armmachinelearning.AccountKeyDatastoreCredentials{ - // CredentialsType: to.Ptr(armmachinelearning.CredentialsTypeAccountKey), - // }, - // DatastoreType: to.Ptr(armmachinelearning.DatastoreTypeAzureBlob), - // IsDefault: to.Ptr(false), - // AccountName: to.Ptr("string"), - // ContainerName: to.Ptr("string"), - // Endpoint: to.Ptr("core.windows.net"), - // Protocol: to.Ptr("https"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Datastore/listSecrets.json -func ExampleDatastoresClient_ListSecrets() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDatastoresClient().ListSecrets(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res = armmachinelearning.DatastoresClientListSecretsResponse{ - // DatastoreSecretsClassification: &armmachinelearning.AccountKeyDatastoreSecrets{ - // SecretsType: to.Ptr(armmachinelearning.SecretsTypeAccountKey), - // Key: to.Ptr("string"), - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/dataversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/dataversions_client.go index 81af82508f27..c958deb92db1 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/dataversions_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/dataversions_client.go @@ -48,7 +48,7 @@ func NewDataVersionsClient(subscriptionID string, credential azcore.TokenCredent // CreateOrUpdate - Create or update version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -99,7 +99,7 @@ func (client *DataVersionsClient) createOrUpdateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -117,7 +117,7 @@ func (client *DataVersionsClient) createOrUpdateHandleResponse(resp *http.Respon // Delete - Delete version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -166,7 +166,7 @@ func (client *DataVersionsClient) deleteCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -175,7 +175,7 @@ func (client *DataVersionsClient) deleteCreateRequest(ctx context.Context, resou // Get - Get version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. @@ -224,7 +224,7 @@ func (client *DataVersionsClient) getCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -241,7 +241,7 @@ func (client *DataVersionsClient) getHandleResponse(resp *http.Response) (DataVe // NewListPager - List data versions in the data container // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Data container's name @@ -298,7 +298,7 @@ func (client *DataVersionsClient) listCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.OrderBy != nil { reqQP.Set("$orderBy", *options.OrderBy) } @@ -314,6 +314,9 @@ func (client *DataVersionsClient) listCreateRequest(ctx context.Context, resourc if options != nil && options.ListViewType != nil { reqQP.Set("listViewType", string(*options.ListViewType)) } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/dataversions_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/dataversions_client_example_test.go deleted file mode 100644 index 2fdcdd57f145..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/dataversions_client_example_test.go +++ /dev/null @@ -1,197 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataVersionBase/list.json -func ExampleDataVersionsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewDataVersionsClient().NewListPager("test-rg", "my-aml-workspace", "string", &armmachinelearning.DataVersionsClientListOptions{OrderBy: to.Ptr("string"), - Top: to.Ptr[int32](1), - Skip: nil, - Tags: to.Ptr("string"), - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.DataVersionBaseResourceArmPaginatedResult = armmachinelearning.DataVersionBaseResourceArmPaginatedResult{ - // Value: []*armmachinelearning.DataVersionBase{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.URIFileDataVersion{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // DataType: to.Ptr(armmachinelearning.DataTypeURIFile), - // DataURI: to.Ptr("string"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataVersionBase/delete.json -func ExampleDataVersionsClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewDataVersionsClient().Delete(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataVersionBase/get.json -func ExampleDataVersionsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDataVersionsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.DataVersionBase = armmachinelearning.DataVersionBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.URIFileDataVersion{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // DataType: to.Ptr(armmachinelearning.DataTypeURIFile), - // DataURI: to.Ptr("string"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/DataVersionBase/createOrUpdate.json -func ExampleDataVersionsClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewDataVersionsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", "string", armmachinelearning.DataVersionBase{ - Properties: &armmachinelearning.URIFileDataVersion{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - IsAnonymous: to.Ptr(false), - DataType: to.Ptr(armmachinelearning.DataTypeURIFile), - DataURI: to.Ptr("string"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.DataVersionBase = armmachinelearning.DataVersionBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.URIFileDataVersion{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // DataType: to.Ptr(armmachinelearning.DataTypeURIFile), - // DataURI: to.Ptr("string"), - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentcontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/environmentcontainers_client.go index 3748216d0062..2543d695bf2c 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentcontainers_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/environmentcontainers_client.go @@ -47,7 +47,7 @@ func NewEnvironmentContainersClient(subscriptionID string, credential azcore.Tok // CreateOrUpdate - Create or update container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -93,7 +93,7 @@ func (client *EnvironmentContainersClient) createOrUpdateCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -111,7 +111,7 @@ func (client *EnvironmentContainersClient) createOrUpdateHandleResponse(resp *ht // Delete - Delete container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -156,7 +156,7 @@ func (client *EnvironmentContainersClient) deleteCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -165,7 +165,7 @@ func (client *EnvironmentContainersClient) deleteCreateRequest(ctx context.Conte // Get - Get container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -210,7 +210,7 @@ func (client *EnvironmentContainersClient) getCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -227,7 +227,7 @@ func (client *EnvironmentContainersClient) getHandleResponse(resp *http.Response // NewListPager - List environment containers. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - EnvironmentContainersClientListOptions contains the optional parameters for the EnvironmentContainersClient.NewListPager @@ -280,7 +280,7 @@ func (client *EnvironmentContainersClient) listCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentcontainers_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/environmentcontainers_client_example_test.go deleted file mode 100644 index 463bd10a1f73..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentcontainers_client_example_test.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentContainer/list.json -func ExampleEnvironmentContainersClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewEnvironmentContainersClient().NewListPager("testrg123", "testworkspace", &armmachinelearning.EnvironmentContainersClientListOptions{Skip: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.EnvironmentContainerResourceArmPaginatedResult = armmachinelearning.EnvironmentContainerResourceArmPaginatedResult{ - // Value: []*armmachinelearning.EnvironmentContainer{ - // { - // Name: to.Ptr("testEnvironment"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/environments"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/environments/testEnvironment"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.EnvironmentContainerProperties{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentContainer/delete.json -func ExampleEnvironmentContainersClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewEnvironmentContainersClient().Delete(ctx, "testrg123", "testworkspace", "testContainer", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentContainer/get.json -func ExampleEnvironmentContainersClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewEnvironmentContainersClient().Get(ctx, "testrg123", "testworkspace", "testEnvironment", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.EnvironmentContainer = armmachinelearning.EnvironmentContainer{ - // Name: to.Ptr("testEnvironment"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/environments"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/environments/testEnvironment"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.EnvironmentContainerProperties{ - // Description: to.Ptr("string"), - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentContainer/createOrUpdate.json -func ExampleEnvironmentContainersClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewEnvironmentContainersClient().CreateOrUpdate(ctx, "testrg123", "testworkspace", "testEnvironment", armmachinelearning.EnvironmentContainer{ - Properties: &armmachinelearning.EnvironmentContainerProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "additionalProp1": to.Ptr("string"), - "additionalProp2": to.Ptr("string"), - "additionalProp3": to.Ptr("string"), - }, - Tags: map[string]*string{ - "additionalProp1": to.Ptr("string"), - "additionalProp2": to.Ptr("string"), - "additionalProp3": to.Ptr("string"), - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.EnvironmentContainer = armmachinelearning.EnvironmentContainer{ - // Name: to.Ptr("testEnvironment"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/environments"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/environments/testEnvironment"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-04T03:39:11.300Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-04T03:39:11.300Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.EnvironmentContainerProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "additionalProp1": to.Ptr("string"), - // "additionalProp2": to.Ptr("string"), - // "additionalProp3": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "additionalProp1": to.Ptr("string"), - // "additionalProp2": to.Ptr("string"), - // "additionalProp3": to.Ptr("string"), - // }, - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/environmentversions_client.go index 5cc6411d1b64..06bd7cfceb77 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentversions_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/environmentversions_client.go @@ -48,7 +48,7 @@ func NewEnvironmentVersionsClient(subscriptionID string, credential azcore.Token // CreateOrUpdate - Creates or updates an EnvironmentVersion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Name of EnvironmentVersion. This is case-sensitive. @@ -99,7 +99,7 @@ func (client *EnvironmentVersionsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -117,7 +117,7 @@ func (client *EnvironmentVersionsClient) createOrUpdateHandleResponse(resp *http // Delete - Delete version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -167,7 +167,7 @@ func (client *EnvironmentVersionsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -176,7 +176,7 @@ func (client *EnvironmentVersionsClient) deleteCreateRequest(ctx context.Context // Get - Get version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -225,7 +225,7 @@ func (client *EnvironmentVersionsClient) getCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -242,7 +242,7 @@ func (client *EnvironmentVersionsClient) getHandleResponse(resp *http.Response) // NewListPager - List versions. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -300,7 +300,7 @@ func (client *EnvironmentVersionsClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.OrderBy != nil { reqQP.Set("$orderBy", *options.OrderBy) } @@ -313,6 +313,9 @@ func (client *EnvironmentVersionsClient) listCreateRequest(ctx context.Context, if options != nil && options.ListViewType != nil { reqQP.Set("listViewType", string(*options.ListViewType)) } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentversions_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/environmentversions_client_example_test.go deleted file mode 100644 index be4e3ab10faa..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/environmentversions_client_example_test.go +++ /dev/null @@ -1,271 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentVersion/list.json -func ExampleEnvironmentVersionsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewEnvironmentVersionsClient().NewListPager("test-rg", "my-aml-workspace", "string", &armmachinelearning.EnvironmentVersionsClientListOptions{OrderBy: to.Ptr("string"), - Top: to.Ptr[int32](1), - Skip: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.EnvironmentVersionResourceArmPaginatedResult = armmachinelearning.EnvironmentVersionResourceArmPaginatedResult{ - // Value: []*armmachinelearning.EnvironmentVersion{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.EnvironmentVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // Build: &armmachinelearning.BuildContext{ - // ContextURI: to.Ptr("https://storage-account.blob.core.windows.net/azureml/DockerBuildContext/95ddede6b9b8c4e90472db3acd0a8d28/"), - // DockerfilePath: to.Ptr("prod/Dockerfile"), - // }, - // CondaFile: to.Ptr("string"), - // EnvironmentType: to.Ptr(armmachinelearning.EnvironmentTypeCurated), - // Image: to.Ptr("docker.io/tensorflow/serving:latest"), - // InferenceConfig: &armmachinelearning.InferenceContainerProperties{ - // LivenessRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // ReadinessRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // ScoringRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentVersion/delete.json -func ExampleEnvironmentVersionsClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewEnvironmentVersionsClient().Delete(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentVersion/get.json -func ExampleEnvironmentVersionsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewEnvironmentVersionsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.EnvironmentVersion = armmachinelearning.EnvironmentVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.EnvironmentVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // Build: &armmachinelearning.BuildContext{ - // ContextURI: to.Ptr("https://storage-account.blob.core.windows.net/azureml/DockerBuildContext/95ddede6b9b8c4e90472db3acd0a8d28/"), - // DockerfilePath: to.Ptr("prod/Dockerfile"), - // }, - // CondaFile: to.Ptr("string"), - // EnvironmentType: to.Ptr(armmachinelearning.EnvironmentTypeCurated), - // Image: to.Ptr("docker.io/tensorflow/serving:latest"), - // InferenceConfig: &armmachinelearning.InferenceContainerProperties{ - // LivenessRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // ReadinessRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // ScoringRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/EnvironmentVersion/createOrUpdate.json -func ExampleEnvironmentVersionsClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewEnvironmentVersionsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", "string", armmachinelearning.EnvironmentVersion{ - Properties: &armmachinelearning.EnvironmentVersionProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - IsAnonymous: to.Ptr(false), - Build: &armmachinelearning.BuildContext{ - ContextURI: to.Ptr("https://storage-account.blob.core.windows.net/azureml/DockerBuildContext/95ddede6b9b8c4e90472db3acd0a8d28/"), - DockerfilePath: to.Ptr("prod/Dockerfile"), - }, - CondaFile: to.Ptr("string"), - Image: to.Ptr("docker.io/tensorflow/serving:latest"), - InferenceConfig: &armmachinelearning.InferenceContainerProperties{ - LivenessRoute: &armmachinelearning.Route{ - Path: to.Ptr("string"), - Port: to.Ptr[int32](1), - }, - ReadinessRoute: &armmachinelearning.Route{ - Path: to.Ptr("string"), - Port: to.Ptr[int32](1), - }, - ScoringRoute: &armmachinelearning.Route{ - Path: to.Ptr("string"), - Port: to.Ptr[int32](1), - }, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.EnvironmentVersion = armmachinelearning.EnvironmentVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.EnvironmentVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // Build: &armmachinelearning.BuildContext{ - // ContextURI: to.Ptr("https://storage-account.blob.core.windows.net/azureml/DockerBuildContext/95ddede6b9b8c4e90472db3acd0a8d28/"), - // DockerfilePath: to.Ptr("prod/Dockerfile"), - // }, - // CondaFile: to.Ptr("string"), - // EnvironmentType: to.Ptr(armmachinelearning.EnvironmentTypeCurated), - // Image: to.Ptr("docker.io/tensorflow/serving:latest"), - // InferenceConfig: &armmachinelearning.InferenceContainerProperties{ - // LivenessRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // ReadinessRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // ScoringRoute: &armmachinelearning.Route{ - // Path: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // }, - // }, - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/features_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/features_client.go new file mode 100644 index 000000000000..62f4fa634648 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/features_client.go @@ -0,0 +1,208 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// FeaturesClient contains the methods for the Features group. +// Don't use this type directly, use NewFeaturesClient() instead. +type FeaturesClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFeaturesClient creates a new instance of FeaturesClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFeaturesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FeaturesClient, error) { + cl, err := arm.NewClient(moduleName+".FeaturesClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FeaturesClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// Get - Get feature. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - featuresetName - Feature set name. This is case-sensitive. +// - featuresetVersion - Feature set version identifier. This is case-sensitive. +// - featureName - Feature Name. This is case-sensitive. +// - options - FeaturesClientGetOptions contains the optional parameters for the FeaturesClient.Get method. +func (client *FeaturesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, featuresetName string, featuresetVersion string, featureName string, options *FeaturesClientGetOptions) (FeaturesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, workspaceName, featuresetName, featuresetVersion, featureName, options) + if err != nil { + return FeaturesClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *FeaturesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, featuresetName string, featuresetVersion string, featureName string, options *FeaturesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features/{featureName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if featuresetName == "" { + return nil, errors.New("parameter featuresetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{featuresetName}", url.PathEscape(featuresetName)) + if featuresetVersion == "" { + return nil, errors.New("parameter featuresetVersion cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{featuresetVersion}", url.PathEscape(featuresetVersion)) + if featureName == "" { + return nil, errors.New("parameter featureName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{featureName}", url.PathEscape(featureName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *FeaturesClient) getHandleResponse(resp *http.Response) (FeaturesClientGetResponse, error) { + result := FeaturesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Feature); err != nil { + return FeaturesClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List Features. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - featuresetName - Featureset name. This is case-sensitive. +// - featuresetVersion - Featureset Version identifier. This is case-sensitive. +// - options - FeaturesClientListOptions contains the optional parameters for the FeaturesClient.NewListPager method. +func (client *FeaturesClient) NewListPager(resourceGroupName string, workspaceName string, featuresetName string, featuresetVersion string, options *FeaturesClientListOptions) *runtime.Pager[FeaturesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[FeaturesClientListResponse]{ + More: func(page FeaturesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FeaturesClientListResponse) (FeaturesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, workspaceName, featuresetName, featuresetVersion, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FeaturesClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *FeaturesClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, featuresetName string, featuresetVersion string, options *FeaturesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if featuresetName == "" { + return nil, errors.New("parameter featuresetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{featuresetName}", url.PathEscape(featuresetName)) + if featuresetVersion == "" { + return nil, errors.New("parameter featuresetVersion cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{featuresetVersion}", url.PathEscape(featuresetVersion)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Tags != nil { + reqQP.Set("tags", *options.Tags) + } + if options != nil && options.FeatureName != nil { + reqQP.Set("featureName", *options.FeatureName) + } + if options != nil && options.Description != nil { + reqQP.Set("description", *options.Description) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *FeaturesClient) listHandleResponse(resp *http.Response) (FeaturesClientListResponse, error) { + result := FeaturesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeatureResourceArmPaginatedResult); err != nil { + return FeaturesClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/featuresetcontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/featuresetcontainers_client.go new file mode 100644 index 000000000000..4a75fe63e853 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/featuresetcontainers_client.go @@ -0,0 +1,345 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// FeaturesetContainersClient contains the methods for the FeaturesetContainers group. +// Don't use this type directly, use NewFeaturesetContainersClient() instead. +type FeaturesetContainersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFeaturesetContainersClient creates a new instance of FeaturesetContainersClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFeaturesetContainersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FeaturesetContainersClient, error) { + cl, err := arm.NewClient(moduleName+".FeaturesetContainersClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FeaturesetContainersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - body - Container entity to create or update. +// - options - FeaturesetContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturesetContainersClient.BeginCreateOrUpdate +// method. +func (client *FeaturesetContainersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, body FeaturesetContainer, options *FeaturesetContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturesetContainersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, name, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturesetContainersClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturesetContainersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturesetContainersClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, body FeaturesetContainer, options *FeaturesetContainersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, name, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *FeaturesetContainersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, body FeaturesetContainer, options *FeaturesetContainersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - options - FeaturesetContainersClientBeginDeleteOptions contains the optional parameters for the FeaturesetContainersClient.BeginDelete +// method. +func (client *FeaturesetContainersClient) BeginDelete(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturesetContainersClientBeginDeleteOptions) (*runtime.Poller[FeaturesetContainersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, workspaceName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturesetContainersClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturesetContainersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturesetContainersClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturesetContainersClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *FeaturesetContainersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturesetContainersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// GetEntity - Get container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - options - FeaturesetContainersClientGetEntityOptions contains the optional parameters for the FeaturesetContainersClient.GetEntity +// method. +func (client *FeaturesetContainersClient) GetEntity(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturesetContainersClientGetEntityOptions) (FeaturesetContainersClientGetEntityResponse, error) { + req, err := client.getEntityCreateRequest(ctx, resourceGroupName, workspaceName, name, options) + if err != nil { + return FeaturesetContainersClientGetEntityResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturesetContainersClientGetEntityResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturesetContainersClientGetEntityResponse{}, runtime.NewResponseError(resp) + } + return client.getEntityHandleResponse(resp) +} + +// getEntityCreateRequest creates the GetEntity request. +func (client *FeaturesetContainersClient) getEntityCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturesetContainersClientGetEntityOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getEntityHandleResponse handles the GetEntity response. +func (client *FeaturesetContainersClient) getEntityHandleResponse(resp *http.Response) (FeaturesetContainersClientGetEntityResponse, error) { + result := FeaturesetContainersClientGetEntityResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturesetContainer); err != nil { + return FeaturesetContainersClientGetEntityResponse{}, err + } + return result, nil +} + +// NewListPager - List featurestore entity containers. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - options - FeaturesetContainersClientListOptions contains the optional parameters for the FeaturesetContainersClient.NewListPager +// method. +func (client *FeaturesetContainersClient) NewListPager(resourceGroupName string, workspaceName string, options *FeaturesetContainersClientListOptions) *runtime.Pager[FeaturesetContainersClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[FeaturesetContainersClientListResponse]{ + More: func(page FeaturesetContainersClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FeaturesetContainersClientListResponse) (FeaturesetContainersClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, workspaceName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FeaturesetContainersClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturesetContainersClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturesetContainersClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *FeaturesetContainersClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *FeaturesetContainersClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Tags != nil { + reqQP.Set("tags", *options.Tags) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + if options != nil && options.PageSize != nil { + reqQP.Set("pageSize", strconv.FormatInt(int64(*options.PageSize), 10)) + } + if options != nil && options.Name != nil { + reqQP.Set("name", *options.Name) + } + if options != nil && options.Description != nil { + reqQP.Set("description", *options.Description) + } + if options != nil && options.CreatedBy != nil { + reqQP.Set("createdBy", *options.CreatedBy) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *FeaturesetContainersClient) listHandleResponse(resp *http.Response) (FeaturesetContainersClientListResponse, error) { + result := FeaturesetContainersClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturesetContainerResourceArmPaginatedResult); err != nil { + return FeaturesetContainersClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/featuresetversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/featuresetversions_client.go new file mode 100644 index 000000000000..287ecc3b44af --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/featuresetversions_client.go @@ -0,0 +1,540 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// FeaturesetVersionsClient contains the methods for the FeaturesetVersions group. +// Don't use this type directly, use NewFeaturesetVersionsClient() instead. +type FeaturesetVersionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFeaturesetVersionsClient creates a new instance of FeaturesetVersionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFeaturesetVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FeaturesetVersionsClient, error) { + cl, err := arm.NewClient(moduleName+".FeaturesetVersionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FeaturesetVersionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginBackfill - Backfill. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Feature set version backfill request entity. +// - options - FeaturesetVersionsClientBeginBackfillOptions contains the optional parameters for the FeaturesetVersionsClient.BeginBackfill +// method. +func (client *FeaturesetVersionsClient) BeginBackfill(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturesetVersionBackfillRequest, options *FeaturesetVersionsClientBeginBackfillOptions) (*runtime.Poller[FeaturesetVersionsClientBackfillResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.backfill(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturesetVersionsClientBackfillResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturesetVersionsClientBackfillResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Backfill - Backfill. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturesetVersionsClient) backfill(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturesetVersionBackfillRequest, options *FeaturesetVersionsClientBeginBackfillOptions) (*http.Response, error) { + req, err := client.backfillCreateRequest(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// backfillCreateRequest creates the Backfill request. +func (client *FeaturesetVersionsClient) backfillCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturesetVersionBackfillRequest, options *FeaturesetVersionsClientBeginBackfillOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/backfill" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginCreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Version entity to create or update. +// - options - FeaturesetVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturesetVersionsClient.BeginCreateOrUpdate +// method. +func (client *FeaturesetVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturesetVersion, options *FeaturesetVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturesetVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturesetVersionsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturesetVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturesetVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturesetVersion, options *FeaturesetVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *FeaturesetVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturesetVersion, options *FeaturesetVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - options - FeaturesetVersionsClientBeginDeleteOptions contains the optional parameters for the FeaturesetVersionsClient.BeginDelete +// method. +func (client *FeaturesetVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturesetVersionsClientBeginDeleteOptions) (*runtime.Poller[FeaturesetVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, workspaceName, name, version, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturesetVersionsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturesetVersionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturesetVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturesetVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, name, version, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *FeaturesetVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturesetVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - options - FeaturesetVersionsClientGetOptions contains the optional parameters for the FeaturesetVersionsClient.Get method. +func (client *FeaturesetVersionsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturesetVersionsClientGetOptions) (FeaturesetVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, workspaceName, name, version, options) + if err != nil { + return FeaturesetVersionsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturesetVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturesetVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *FeaturesetVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturesetVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *FeaturesetVersionsClient) getHandleResponse(resp *http.Response) (FeaturesetVersionsClientGetResponse, error) { + result := FeaturesetVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturesetVersion); err != nil { + return FeaturesetVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List versions. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Featureset name. This is case-sensitive. +// - options - FeaturesetVersionsClientListOptions contains the optional parameters for the FeaturesetVersionsClient.NewListPager +// method. +func (client *FeaturesetVersionsClient) NewListPager(resourceGroupName string, workspaceName string, name string, options *FeaturesetVersionsClientListOptions) *runtime.Pager[FeaturesetVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[FeaturesetVersionsClientListResponse]{ + More: func(page FeaturesetVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FeaturesetVersionsClientListResponse) (FeaturesetVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, workspaceName, name, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FeaturesetVersionsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturesetVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturesetVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *FeaturesetVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturesetVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Tags != nil { + reqQP.Set("tags", *options.Tags) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + if options != nil && options.PageSize != nil { + reqQP.Set("pageSize", strconv.FormatInt(int64(*options.PageSize), 10)) + } + if options != nil && options.VersionName != nil { + reqQP.Set("versionName", *options.VersionName) + } + if options != nil && options.Version != nil { + reqQP.Set("version", *options.Version) + } + if options != nil && options.Description != nil { + reqQP.Set("description", *options.Description) + } + if options != nil && options.CreatedBy != nil { + reqQP.Set("createdBy", *options.CreatedBy) + } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *FeaturesetVersionsClient) listHandleResponse(resp *http.Response) (FeaturesetVersionsClientListResponse, error) { + result := FeaturesetVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturesetVersionResourceArmPaginatedResult); err != nil { + return FeaturesetVersionsClientListResponse{}, err + } + return result, nil +} + +// NewListMaterializationJobsPager - List materialization Jobs. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - options - FeaturesetVersionsClientListMaterializationJobsOptions contains the optional parameters for the FeaturesetVersionsClient.NewListMaterializationJobsPager +// method. +func (client *FeaturesetVersionsClient) NewListMaterializationJobsPager(resourceGroupName string, workspaceName string, name string, version string, options *FeaturesetVersionsClientListMaterializationJobsOptions) *runtime.Pager[FeaturesetVersionsClientListMaterializationJobsResponse] { + return runtime.NewPager(runtime.PagingHandler[FeaturesetVersionsClientListMaterializationJobsResponse]{ + More: func(page FeaturesetVersionsClientListMaterializationJobsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FeaturesetVersionsClientListMaterializationJobsResponse) (FeaturesetVersionsClientListMaterializationJobsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listMaterializationJobsCreateRequest(ctx, resourceGroupName, workspaceName, name, version, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FeaturesetVersionsClientListMaterializationJobsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturesetVersionsClientListMaterializationJobsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturesetVersionsClientListMaterializationJobsResponse{}, runtime.NewResponseError(resp) + } + return client.listMaterializationJobsHandleResponse(resp) + }, + }) +} + +// listMaterializationJobsCreateRequest creates the ListMaterializationJobs request. +func (client *FeaturesetVersionsClient) listMaterializationJobsCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturesetVersionsClientListMaterializationJobsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/listMaterializationJobs" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Filters != nil { + reqQP.Set("filters", *options.Filters) + } + if options != nil && options.FeatureWindowStart != nil { + reqQP.Set("featureWindowStart", *options.FeatureWindowStart) + } + if options != nil && options.FeatureWindowEnd != nil { + reqQP.Set("featureWindowEnd", *options.FeatureWindowEnd) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listMaterializationJobsHandleResponse handles the ListMaterializationJobs response. +func (client *FeaturesetVersionsClient) listMaterializationJobsHandleResponse(resp *http.Response) (FeaturesetVersionsClientListMaterializationJobsResponse, error) { + result := FeaturesetVersionsClientListMaterializationJobsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturesetJobArmPaginatedResult); err != nil { + return FeaturesetVersionsClientListMaterializationJobsResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/featurestoreentitycontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/featurestoreentitycontainers_client.go new file mode 100644 index 000000000000..48d6a07292d9 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/featurestoreentitycontainers_client.go @@ -0,0 +1,345 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// FeaturestoreEntityContainersClient contains the methods for the FeaturestoreEntityContainers group. +// Don't use this type directly, use NewFeaturestoreEntityContainersClient() instead. +type FeaturestoreEntityContainersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFeaturestoreEntityContainersClient creates a new instance of FeaturestoreEntityContainersClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFeaturestoreEntityContainersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FeaturestoreEntityContainersClient, error) { + cl, err := arm.NewClient(moduleName+".FeaturestoreEntityContainersClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FeaturestoreEntityContainersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - body - Container entity to create or update. +// - options - FeaturestoreEntityContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturestoreEntityContainersClient.BeginCreateOrUpdate +// method. +func (client *FeaturestoreEntityContainersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, body FeaturestoreEntityContainer, options *FeaturestoreEntityContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturestoreEntityContainersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, name, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturestoreEntityContainersClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturestoreEntityContainersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturestoreEntityContainersClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, body FeaturestoreEntityContainer, options *FeaturestoreEntityContainersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, name, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *FeaturestoreEntityContainersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, body FeaturestoreEntityContainer, options *FeaturestoreEntityContainersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - options - FeaturestoreEntityContainersClientBeginDeleteOptions contains the optional parameters for the FeaturestoreEntityContainersClient.BeginDelete +// method. +func (client *FeaturestoreEntityContainersClient) BeginDelete(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturestoreEntityContainersClientBeginDeleteOptions) (*runtime.Poller[FeaturestoreEntityContainersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, workspaceName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturestoreEntityContainersClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturestoreEntityContainersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturestoreEntityContainersClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturestoreEntityContainersClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *FeaturestoreEntityContainersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturestoreEntityContainersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// GetEntity - Get container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - options - FeaturestoreEntityContainersClientGetEntityOptions contains the optional parameters for the FeaturestoreEntityContainersClient.GetEntity +// method. +func (client *FeaturestoreEntityContainersClient) GetEntity(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturestoreEntityContainersClientGetEntityOptions) (FeaturestoreEntityContainersClientGetEntityResponse, error) { + req, err := client.getEntityCreateRequest(ctx, resourceGroupName, workspaceName, name, options) + if err != nil { + return FeaturestoreEntityContainersClientGetEntityResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturestoreEntityContainersClientGetEntityResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturestoreEntityContainersClientGetEntityResponse{}, runtime.NewResponseError(resp) + } + return client.getEntityHandleResponse(resp) +} + +// getEntityCreateRequest creates the GetEntity request. +func (client *FeaturestoreEntityContainersClient) getEntityCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturestoreEntityContainersClientGetEntityOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getEntityHandleResponse handles the GetEntity response. +func (client *FeaturestoreEntityContainersClient) getEntityHandleResponse(resp *http.Response) (FeaturestoreEntityContainersClientGetEntityResponse, error) { + result := FeaturestoreEntityContainersClientGetEntityResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturestoreEntityContainer); err != nil { + return FeaturestoreEntityContainersClientGetEntityResponse{}, err + } + return result, nil +} + +// NewListPager - List featurestore entity containers. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - options - FeaturestoreEntityContainersClientListOptions contains the optional parameters for the FeaturestoreEntityContainersClient.NewListPager +// method. +func (client *FeaturestoreEntityContainersClient) NewListPager(resourceGroupName string, workspaceName string, options *FeaturestoreEntityContainersClientListOptions) *runtime.Pager[FeaturestoreEntityContainersClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[FeaturestoreEntityContainersClientListResponse]{ + More: func(page FeaturestoreEntityContainersClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FeaturestoreEntityContainersClientListResponse) (FeaturestoreEntityContainersClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, workspaceName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FeaturestoreEntityContainersClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturestoreEntityContainersClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturestoreEntityContainersClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *FeaturestoreEntityContainersClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *FeaturestoreEntityContainersClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Tags != nil { + reqQP.Set("tags", *options.Tags) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + if options != nil && options.PageSize != nil { + reqQP.Set("pageSize", strconv.FormatInt(int64(*options.PageSize), 10)) + } + if options != nil && options.Name != nil { + reqQP.Set("name", *options.Name) + } + if options != nil && options.Description != nil { + reqQP.Set("description", *options.Description) + } + if options != nil && options.CreatedBy != nil { + reqQP.Set("createdBy", *options.CreatedBy) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *FeaturestoreEntityContainersClient) listHandleResponse(resp *http.Response) (FeaturestoreEntityContainersClientListResponse, error) { + result := FeaturestoreEntityContainersClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturestoreEntityContainerResourceArmPaginatedResult); err != nil { + return FeaturestoreEntityContainersClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/featurestoreentityversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/featurestoreentityversions_client.go new file mode 100644 index 000000000000..bcbbe291b3e9 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/featurestoreentityversions_client.go @@ -0,0 +1,371 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// FeaturestoreEntityVersionsClient contains the methods for the FeaturestoreEntityVersions group. +// Don't use this type directly, use NewFeaturestoreEntityVersionsClient() instead. +type FeaturestoreEntityVersionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFeaturestoreEntityVersionsClient creates a new instance of FeaturestoreEntityVersionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFeaturestoreEntityVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FeaturestoreEntityVersionsClient, error) { + cl, err := arm.NewClient(moduleName+".FeaturestoreEntityVersionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FeaturestoreEntityVersionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Version entity to create or update. +// - options - FeaturestoreEntityVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.BeginCreateOrUpdate +// method. +func (client *FeaturestoreEntityVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturestoreEntityVersion, options *FeaturestoreEntityVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[FeaturestoreEntityVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturestoreEntityVersionsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturestoreEntityVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturestoreEntityVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturestoreEntityVersion, options *FeaturestoreEntityVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *FeaturestoreEntityVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body FeaturestoreEntityVersion, options *FeaturestoreEntityVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - options - FeaturestoreEntityVersionsClientBeginDeleteOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.BeginDelete +// method. +func (client *FeaturestoreEntityVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturestoreEntityVersionsClientBeginDeleteOptions) (*runtime.Poller[FeaturestoreEntityVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, workspaceName, name, version, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FeaturestoreEntityVersionsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[FeaturestoreEntityVersionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *FeaturestoreEntityVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturestoreEntityVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, name, version, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *FeaturestoreEntityVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturestoreEntityVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - options - FeaturestoreEntityVersionsClientGetOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.Get +// method. +func (client *FeaturestoreEntityVersionsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturestoreEntityVersionsClientGetOptions) (FeaturestoreEntityVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, workspaceName, name, version, options) + if err != nil { + return FeaturestoreEntityVersionsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturestoreEntityVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturestoreEntityVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *FeaturestoreEntityVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, options *FeaturestoreEntityVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *FeaturestoreEntityVersionsClient) getHandleResponse(resp *http.Response) (FeaturestoreEntityVersionsClientGetResponse, error) { + result := FeaturestoreEntityVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturestoreEntityVersion); err != nil { + return FeaturestoreEntityVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List versions. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Feature entity name. This is case-sensitive. +// - options - FeaturestoreEntityVersionsClientListOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.NewListPager +// method. +func (client *FeaturestoreEntityVersionsClient) NewListPager(resourceGroupName string, workspaceName string, name string, options *FeaturestoreEntityVersionsClientListOptions) *runtime.Pager[FeaturestoreEntityVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[FeaturestoreEntityVersionsClientListResponse]{ + More: func(page FeaturestoreEntityVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FeaturestoreEntityVersionsClientListResponse) (FeaturestoreEntityVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, workspaceName, name, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FeaturestoreEntityVersionsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FeaturestoreEntityVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FeaturestoreEntityVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *FeaturestoreEntityVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *FeaturestoreEntityVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Tags != nil { + reqQP.Set("tags", *options.Tags) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + if options != nil && options.PageSize != nil { + reqQP.Set("pageSize", strconv.FormatInt(int64(*options.PageSize), 10)) + } + if options != nil && options.VersionName != nil { + reqQP.Set("versionName", *options.VersionName) + } + if options != nil && options.Version != nil { + reqQP.Set("version", *options.Version) + } + if options != nil && options.Description != nil { + reqQP.Set("description", *options.Description) + } + if options != nil && options.CreatedBy != nil { + reqQP.Set("createdBy", *options.CreatedBy) + } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *FeaturestoreEntityVersionsClient) listHandleResponse(resp *http.Response) (FeaturestoreEntityVersionsClientListResponse, error) { + result := FeaturestoreEntityVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FeaturestoreEntityVersionResourceArmPaginatedResult); err != nil { + return FeaturestoreEntityVersionsClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/go.mod b/sdk/resourcemanager/machinelearning/armmachinelearning/go.mod index 5503d012775a..86f1a11a4c38 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/go.mod +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/go.mod @@ -1,21 +1,13 @@ -module github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3 +module github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v4 go 1.18 -require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 -) +require github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - golang.org/x/crypto v0.6.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/go.sum b/sdk/resourcemanager/machinelearning/armmachinelearning/go.sum index 8ba445a8c4da..b6bd7eaad1ba 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/go.sum +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/go.sum @@ -1,31 +1,15 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/jobs_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/jobs_client.go index a5efdcbca484..a0c520677a82 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/jobs_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/jobs_client.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "net/http" "net/url" + "strconv" "strings" ) @@ -47,7 +48,7 @@ func NewJobsClient(subscriptionID string, credential azcore.TokenCredential, opt // BeginCancel - Cancels a Job (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - id - The name and identifier for the Job. This is case-sensitive. @@ -69,7 +70,7 @@ func (client *JobsClient) BeginCancel(ctx context.Context, resourceGroupName str // Cancel - Cancels a Job (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *JobsClient) cancel(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *JobsClientBeginCancelOptions) (*http.Response, error) { req, err := client.cancelCreateRequest(ctx, resourceGroupName, workspaceName, id, options) if err != nil { @@ -109,7 +110,7 @@ func (client *JobsClient) cancelCreateRequest(ctx context.Context, resourceGroup return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -118,7 +119,7 @@ func (client *JobsClient) cancelCreateRequest(ctx context.Context, resourceGroup // CreateOrUpdate - Creates and executes a Job. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - id - The name and identifier for the Job. This is case-sensitive. @@ -163,7 +164,7 @@ func (client *JobsClient) createOrUpdateCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -181,7 +182,7 @@ func (client *JobsClient) createOrUpdateHandleResponse(resp *http.Response) (Job // BeginDelete - Deletes a Job (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - id - The name and identifier for the Job. This is case-sensitive. @@ -192,7 +193,9 @@ func (client *JobsClient) BeginDelete(ctx context.Context, resourceGroupName str if err != nil { return nil, err } - return runtime.NewPoller[JobsClientDeleteResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[JobsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[JobsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -201,7 +204,7 @@ func (client *JobsClient) BeginDelete(ctx context.Context, resourceGroupName str // Delete - Deletes a Job (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *JobsClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *JobsClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, id, options) if err != nil { @@ -241,7 +244,7 @@ func (client *JobsClient) deleteCreateRequest(ctx context.Context, resourceGroup return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -250,7 +253,7 @@ func (client *JobsClient) deleteCreateRequest(ctx context.Context, resourceGroup // Get - Gets a Job by name/id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - id - The name and identifier for the Job. This is case-sensitive. @@ -294,7 +297,7 @@ func (client *JobsClient) getCreateRequest(ctx context.Context, resourceGroupNam return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -311,7 +314,7 @@ func (client *JobsClient) getHandleResponse(resp *http.Response) (JobsClientGetR // NewListPager - Lists Jobs in the workspace. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - JobsClientListOptions contains the optional parameters for the JobsClient.NewListPager method. @@ -363,7 +366,7 @@ func (client *JobsClient) listCreateRequest(ctx context.Context, resourceGroupNa return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } @@ -376,6 +379,15 @@ func (client *JobsClient) listCreateRequest(ctx context.Context, resourceGroupNa if options != nil && options.ListViewType != nil { reqQP.Set("listViewType", string(*options.ListViewType)) } + if options != nil && options.AssetName != nil { + reqQP.Set("assetName", *options.AssetName) + } + if options != nil && options.Scheduled != nil { + reqQP.Set("scheduled", strconv.FormatBool(*options.Scheduled)) + } + if options != nil && options.ScheduleID != nil { + reqQP.Set("scheduleId", *options.ScheduleID) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -389,3 +401,66 @@ func (client *JobsClient) listHandleResponse(resp *http.Response) (JobsClientLis } return result, nil } + +// Update - Updates a Job. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - id - The name and identifier for the Job. This is case-sensitive. +// - body - Job definition to apply during the operation. +// - options - JobsClientUpdateOptions contains the optional parameters for the JobsClient.Update method. +func (client *JobsClient) Update(ctx context.Context, resourceGroupName string, workspaceName string, id string, body PartialJobBasePartialResource, options *JobsClientUpdateOptions) (JobsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, id, body, options) + if err != nil { + return JobsClientUpdateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return JobsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return JobsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *JobsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, id string, body PartialJobBasePartialResource, options *JobsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if id == "" { + return nil, errors.New("parameter id cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{id}", url.PathEscape(id)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// updateHandleResponse handles the Update response. +func (client *JobsClient) updateHandleResponse(resp *http.Response) (JobsClientUpdateResponse, error) { + result := JobsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.JobBase); err != nil { + return JobsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/jobs_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/jobs_client_example_test.go deleted file mode 100644 index 9428c4ecfeb3..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/jobs_client_example_test.go +++ /dev/null @@ -1,1500 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/AutoMLJob/list.json -func ExampleJobsClient_NewListPager_listAutoMlJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewJobsClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.JobsClientListOptions{Skip: nil, - JobType: nil, - Tag: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.JobBaseResourceArmPaginatedResult = armmachinelearning.JobBaseResourceArmPaginatedResult{ - // Value: []*armmachinelearning.JobBase{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeManagedIdentity), - // }, - // Properties: &armmachinelearning.AutoMLJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // Identity: &armmachinelearning.AmlToken{ - // IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - // }, - // IsArchived: to.Ptr(false), - // JobType: to.Ptr(armmachinelearning.JobTypeAutoML), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatus("Scheduled")), - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": nil, - // }, - // }, - // }, - // TaskDetails: &armmachinelearning.ImageClassification{ - // TargetColumnName: to.Ptr("string"), - // TaskType: to.Ptr(armmachinelearning.TaskTypeImageClassification), - // TrainingData: &armmachinelearning.MLTableJobInput{ - // URI: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeMltable), - // }, - // LimitSettings: &armmachinelearning.ImageLimitSettings{ - // MaxTrials: to.Ptr[int32](2), - // }, - // ModelSettings: &armmachinelearning.ImageModelSettingsClassification{ - // ValidationCropSize: to.Ptr[int32](2), - // }, - // SearchSpace: []*armmachinelearning.ImageModelDistributionSettingsClassification{ - // { - // ValidationCropSize: to.Ptr("choice(2, 360)"), - // }}, - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/CommandJob/list.json -func ExampleJobsClient_NewListPager_listCommandJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewJobsClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.JobsClientListOptions{Skip: nil, - JobType: to.Ptr("string"), - Tag: to.Ptr("string"), - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.JobBaseResourceArmPaginatedResult = armmachinelearning.JobBaseResourceArmPaginatedResult{ - // Value: []*armmachinelearning.JobBase{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CommandJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // Identity: &armmachinelearning.AmlToken{ - // IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - // }, - // JobType: to.Ptr(armmachinelearning.JobTypeCommand), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // CodeID: to.Ptr("string"), - // Command: to.Ptr("string"), - // Distribution: &armmachinelearning.TensorFlow{ - // DistributionType: to.Ptr(armmachinelearning.DistributionTypeTensorFlow), - // ParameterServerCount: to.Ptr[int32](1), - // WorkerCount: to.Ptr[int32](1), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Inputs: map[string]armmachinelearning.JobInputClassification{ - // "string": &armmachinelearning.LiteralJobInput{ - // Description: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - // Value: to.Ptr("string"), - // }, - // }, - // Limits: &armmachinelearning.CommandJobLimits{ - // JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeCommand), - // Timeout: to.Ptr("PT5M"), - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Parameters: map[string]any{ - // "string": "string", - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "7aad5998-6c83-4ca9-b50a-b44dfc43f420": nil, - // }, - // }, - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/PipelineJob/list.json -func ExampleJobsClient_NewListPager_listPipelineJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewJobsClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.JobsClientListOptions{Skip: nil, - JobType: to.Ptr("string"), - Tag: to.Ptr("string"), - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.JobBaseResourceArmPaginatedResult = armmachinelearning.JobBaseResourceArmPaginatedResult{ - // Value: []*armmachinelearning.JobBase{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.PipelineJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // JobType: to.Ptr(armmachinelearning.JobTypePipeline), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // Inputs: map[string]armmachinelearning.JobInputClassification{ - // "string": &armmachinelearning.LiteralJobInput{ - // Description: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - // Value: to.Ptr("string"), - // }, - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeUpload), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Settings: map[string]any{ - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/SweepJob/list.json -func ExampleJobsClient_NewListPager_listSweepJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewJobsClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.JobsClientListOptions{Skip: nil, - JobType: to.Ptr("string"), - Tag: to.Ptr("string"), - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.JobBaseResourceArmPaginatedResult = armmachinelearning.JobBaseResourceArmPaginatedResult{ - // Value: []*armmachinelearning.JobBase{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.SweepJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // JobType: to.Ptr(armmachinelearning.JobTypeSweep), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // EarlyTermination: &armmachinelearning.MedianStoppingPolicy{ - // DelayEvaluation: to.Ptr[int32](1), - // EvaluationInterval: to.Ptr[int32](1), - // PolicyType: to.Ptr(armmachinelearning.EarlyTerminationPolicyTypeMedianStopping), - // }, - // Limits: &armmachinelearning.SweepJobLimits{ - // JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeSweep), - // MaxConcurrentTrials: to.Ptr[int32](1), - // MaxTotalTrials: to.Ptr[int32](1), - // TrialTimeout: to.Ptr("PT1S"), - // }, - // Objective: &armmachinelearning.Objective{ - // Goal: to.Ptr(armmachinelearning.GoalMinimize), - // PrimaryMetric: to.Ptr("string"), - // }, - // SamplingAlgorithm: &armmachinelearning.GridSamplingAlgorithm{ - // SamplingAlgorithmType: to.Ptr(armmachinelearning.SamplingAlgorithmTypeGrid), - // }, - // SearchSpace: map[string]any{ - // "string":map[string]any{ - // }, - // }, - // Trial: &armmachinelearning.TrialComponent{ - // CodeID: to.Ptr("string"), - // Command: to.Ptr("string"), - // Distribution: &armmachinelearning.Mpi{ - // DistributionType: to.Ptr(armmachinelearning.DistributionTypeMpi), - // ProcessCountPerInstance: to.Ptr[int32](1), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "e6b6493e-7d5e-4db3-be1e-306ec641327e": nil, - // }, - // }, - // }, - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/delete.json -func ExampleJobsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewJobsClient().BeginDelete(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/AutoMLJob/get.json -func ExampleJobsClient_Get_getAutoMlJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeManagedIdentity), - // }, - // Properties: &armmachinelearning.AutoMLJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // Identity: &armmachinelearning.AmlToken{ - // IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - // }, - // IsArchived: to.Ptr(false), - // JobType: to.Ptr(armmachinelearning.JobTypeAutoML), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatus("Scheduled")), - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": nil, - // }, - // }, - // }, - // TaskDetails: &armmachinelearning.ImageClassification{ - // TargetColumnName: to.Ptr("string"), - // TaskType: to.Ptr(armmachinelearning.TaskTypeImageClassification), - // TrainingData: &armmachinelearning.MLTableJobInput{ - // URI: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeMltable), - // }, - // LimitSettings: &armmachinelearning.ImageLimitSettings{ - // MaxTrials: to.Ptr[int32](2), - // }, - // ModelSettings: &armmachinelearning.ImageModelSettingsClassification{ - // ValidationCropSize: to.Ptr[int32](2), - // }, - // SearchSpace: []*armmachinelearning.ImageModelDistributionSettingsClassification{ - // { - // ValidationCropSize: to.Ptr("choice(2, 360)"), - // }}, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/CommandJob/get.json -func ExampleJobsClient_Get_getCommandJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CommandJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // Identity: &armmachinelearning.AmlToken{ - // IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - // }, - // JobType: to.Ptr(armmachinelearning.JobTypeCommand), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // CodeID: to.Ptr("string"), - // Command: to.Ptr("string"), - // Distribution: &armmachinelearning.TensorFlow{ - // DistributionType: to.Ptr(armmachinelearning.DistributionTypeTensorFlow), - // ParameterServerCount: to.Ptr[int32](1), - // WorkerCount: to.Ptr[int32](1), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Inputs: map[string]armmachinelearning.JobInputClassification{ - // "string": &armmachinelearning.LiteralJobInput{ - // Description: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - // Value: to.Ptr("string"), - // }, - // }, - // Limits: &armmachinelearning.CommandJobLimits{ - // JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeCommand), - // Timeout: to.Ptr("PT5M"), - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Parameters: map[string]any{ - // "string": "string", - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "8385cf05-78c0-41ef-b31d-36796a678e19": nil, - // }, - // }, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/PipelineJob/get.json -func ExampleJobsClient_Get_getPipelineJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.PipelineJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // JobType: to.Ptr(armmachinelearning.JobTypePipeline), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // Inputs: map[string]armmachinelearning.JobInputClassification{ - // "string": &armmachinelearning.LiteralJobInput{ - // Description: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - // Value: to.Ptr("string"), - // }, - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeUpload), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Settings: map[string]any{ - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/SweepJob/get.json -func ExampleJobsClient_Get_getSweepJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.SweepJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // JobType: to.Ptr(armmachinelearning.JobTypeSweep), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // EarlyTermination: &armmachinelearning.MedianStoppingPolicy{ - // DelayEvaluation: to.Ptr[int32](1), - // EvaluationInterval: to.Ptr[int32](1), - // PolicyType: to.Ptr(armmachinelearning.EarlyTerminationPolicyTypeMedianStopping), - // }, - // Limits: &armmachinelearning.SweepJobLimits{ - // JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeSweep), - // MaxConcurrentTrials: to.Ptr[int32](1), - // MaxTotalTrials: to.Ptr[int32](1), - // TrialTimeout: to.Ptr("PT1S"), - // }, - // Objective: &armmachinelearning.Objective{ - // Goal: to.Ptr(armmachinelearning.GoalMinimize), - // PrimaryMetric: to.Ptr("string"), - // }, - // SamplingAlgorithm: &armmachinelearning.GridSamplingAlgorithm{ - // SamplingAlgorithmType: to.Ptr(armmachinelearning.SamplingAlgorithmTypeGrid), - // }, - // SearchSpace: map[string]any{ - // "string":map[string]any{ - // }, - // }, - // Trial: &armmachinelearning.TrialComponent{ - // CodeID: to.Ptr("string"), - // Command: to.Ptr("string"), - // Distribution: &armmachinelearning.Mpi{ - // DistributionType: to.Ptr(armmachinelearning.DistributionTypeMpi), - // ProcessCountPerInstance: to.Ptr[int32](1), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "e6b6493e-7d5e-4db3-be1e-306ec641327e": nil, - // }, - // }, - // }, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/AutoMLJob/createOrUpdate.json -func ExampleJobsClient_CreateOrUpdate_createOrUpdateAutoMlJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.JobBase{ - Properties: &armmachinelearning.AutoMLJob{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - ComputeID: to.Ptr("string"), - DisplayName: to.Ptr("string"), - ExperimentName: to.Ptr("string"), - Identity: &armmachinelearning.AmlToken{ - IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - }, - IsArchived: to.Ptr(false), - JobType: to.Ptr(armmachinelearning.JobTypeAutoML), - Services: map[string]*armmachinelearning.JobService{ - "string": { - Endpoint: to.Ptr("string"), - JobServiceType: to.Ptr("string"), - Port: to.Ptr[int32](1), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - }, - }, - EnvironmentID: to.Ptr("string"), - EnvironmentVariables: map[string]*string{ - "string": to.Ptr("string"), - }, - Outputs: map[string]armmachinelearning.JobOutputClassification{ - "string": &armmachinelearning.URIFileJobOutput{ - Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - URI: to.Ptr("string"), - Description: to.Ptr("string"), - JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - }, - }, - Resources: &armmachinelearning.JobResourceConfiguration{ - InstanceCount: to.Ptr[int32](1), - InstanceType: to.Ptr("string"), - Properties: map[string]any{ - "string": map[string]any{ - "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": nil, - }, - }, - }, - TaskDetails: &armmachinelearning.ImageClassification{ - TargetColumnName: to.Ptr("string"), - TaskType: to.Ptr(armmachinelearning.TaskTypeImageClassification), - TrainingData: &armmachinelearning.MLTableJobInput{ - URI: to.Ptr("string"), - JobInputType: to.Ptr(armmachinelearning.JobInputTypeMltable), - }, - LimitSettings: &armmachinelearning.ImageLimitSettings{ - MaxTrials: to.Ptr[int32](2), - }, - ModelSettings: &armmachinelearning.ImageModelSettingsClassification{ - ValidationCropSize: to.Ptr[int32](2), - }, - SearchSpace: []*armmachinelearning.ImageModelDistributionSettingsClassification{ - { - ValidationCropSize: to.Ptr("choice(2, 360)"), - }}, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeManagedIdentity), - // }, - // Properties: &armmachinelearning.AutoMLJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // Identity: &armmachinelearning.AmlToken{ - // IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - // }, - // IsArchived: to.Ptr(false), - // JobType: to.Ptr(armmachinelearning.JobTypeAutoML), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatus("Scheduled")), - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": nil, - // }, - // }, - // }, - // TaskDetails: &armmachinelearning.ImageClassification{ - // TargetColumnName: to.Ptr("string"), - // TaskType: to.Ptr(armmachinelearning.TaskTypeImageClassification), - // TrainingData: &armmachinelearning.MLTableJobInput{ - // URI: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeMltable), - // }, - // LimitSettings: &armmachinelearning.ImageLimitSettings{ - // MaxTrials: to.Ptr[int32](2), - // }, - // ModelSettings: &armmachinelearning.ImageModelSettingsClassification{ - // ValidationCropSize: to.Ptr[int32](2), - // }, - // SearchSpace: []*armmachinelearning.ImageModelDistributionSettingsClassification{ - // { - // ValidationCropSize: to.Ptr("choice(2, 360)"), - // }}, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/CommandJob/createOrUpdate.json -func ExampleJobsClient_CreateOrUpdate_createOrUpdateCommandJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.JobBase{ - Properties: &armmachinelearning.CommandJob{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - ComputeID: to.Ptr("string"), - DisplayName: to.Ptr("string"), - ExperimentName: to.Ptr("string"), - Identity: &armmachinelearning.AmlToken{ - IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - }, - JobType: to.Ptr(armmachinelearning.JobTypeCommand), - Services: map[string]*armmachinelearning.JobService{ - "string": { - Endpoint: to.Ptr("string"), - JobServiceType: to.Ptr("string"), - Port: to.Ptr[int32](1), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - }, - }, - CodeID: to.Ptr("string"), - Command: to.Ptr("string"), - Distribution: &armmachinelearning.TensorFlow{ - DistributionType: to.Ptr(armmachinelearning.DistributionTypeTensorFlow), - ParameterServerCount: to.Ptr[int32](1), - WorkerCount: to.Ptr[int32](1), - }, - EnvironmentID: to.Ptr("string"), - EnvironmentVariables: map[string]*string{ - "string": to.Ptr("string"), - }, - Inputs: map[string]armmachinelearning.JobInputClassification{ - "string": &armmachinelearning.LiteralJobInput{ - Description: to.Ptr("string"), - JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - Value: to.Ptr("string"), - }, - }, - Limits: &armmachinelearning.CommandJobLimits{ - JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeCommand), - Timeout: to.Ptr("PT5M"), - }, - Outputs: map[string]armmachinelearning.JobOutputClassification{ - "string": &armmachinelearning.URIFileJobOutput{ - Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - URI: to.Ptr("string"), - Description: to.Ptr("string"), - JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - }, - }, - Resources: &armmachinelearning.JobResourceConfiguration{ - InstanceCount: to.Ptr[int32](1), - InstanceType: to.Ptr("string"), - Properties: map[string]any{ - "string": map[string]any{ - "e6b6493e-7d5e-4db3-be1e-306ec641327e": nil, - }, - }, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.CommandJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // Identity: &armmachinelearning.AmlToken{ - // IdentityType: to.Ptr(armmachinelearning.IdentityConfigurationTypeAMLToken), - // }, - // JobType: to.Ptr(armmachinelearning.JobTypeCommand), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // CodeID: to.Ptr("string"), - // Command: to.Ptr("string"), - // Distribution: &armmachinelearning.TensorFlow{ - // DistributionType: to.Ptr(armmachinelearning.DistributionTypeTensorFlow), - // ParameterServerCount: to.Ptr[int32](1), - // WorkerCount: to.Ptr[int32](1), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Inputs: map[string]armmachinelearning.JobInputClassification{ - // "string": &armmachinelearning.LiteralJobInput{ - // Description: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - // Value: to.Ptr("string"), - // }, - // }, - // Limits: &armmachinelearning.CommandJobLimits{ - // JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeCommand), - // Timeout: to.Ptr("PT5M"), - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeReadWriteMount), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Parameters: map[string]any{ - // "string": "string", - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "a0847709-f5aa-4561-8ba5-d915d403fdcf": nil, - // }, - // }, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/PipelineJob/createOrUpdate.json -func ExampleJobsClient_CreateOrUpdate_createOrUpdatePipelineJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.JobBase{ - Properties: &armmachinelearning.PipelineJob{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - ComputeID: to.Ptr("string"), - DisplayName: to.Ptr("string"), - ExperimentName: to.Ptr("string"), - JobType: to.Ptr(armmachinelearning.JobTypePipeline), - Services: map[string]*armmachinelearning.JobService{ - "string": { - Endpoint: to.Ptr("string"), - JobServiceType: to.Ptr("string"), - Port: to.Ptr[int32](1), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - }, - }, - Inputs: map[string]armmachinelearning.JobInputClassification{ - "string": &armmachinelearning.LiteralJobInput{ - Description: to.Ptr("string"), - JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - Value: to.Ptr("string"), - }, - }, - Outputs: map[string]armmachinelearning.JobOutputClassification{ - "string": &armmachinelearning.URIFileJobOutput{ - Mode: to.Ptr(armmachinelearning.OutputDeliveryModeUpload), - URI: to.Ptr("string"), - Description: to.Ptr("string"), - JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - }, - }, - Settings: map[string]any{}, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.PipelineJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // JobType: to.Ptr(armmachinelearning.JobTypePipeline), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // Inputs: map[string]armmachinelearning.JobInputClassification{ - // "string": &armmachinelearning.LiteralJobInput{ - // Description: to.Ptr("string"), - // JobInputType: to.Ptr(armmachinelearning.JobInputTypeLiteral), - // Value: to.Ptr("string"), - // }, - // }, - // Outputs: map[string]armmachinelearning.JobOutputClassification{ - // "string": &armmachinelearning.URIFileJobOutput{ - // Mode: to.Ptr(armmachinelearning.OutputDeliveryModeUpload), - // URI: to.Ptr("string"), - // Description: to.Ptr("string"), - // JobOutputType: to.Ptr(armmachinelearning.JobOutputTypeURIFile), - // }, - // }, - // Settings: map[string]any{ - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/SweepJob/createOrUpdate.json -func ExampleJobsClient_CreateOrUpdate_createOrUpdateSweepJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewJobsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.JobBase{ - Properties: &armmachinelearning.SweepJob{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - ComputeID: to.Ptr("string"), - DisplayName: to.Ptr("string"), - ExperimentName: to.Ptr("string"), - JobType: to.Ptr(armmachinelearning.JobTypeSweep), - Services: map[string]*armmachinelearning.JobService{ - "string": { - Endpoint: to.Ptr("string"), - JobServiceType: to.Ptr("string"), - Port: to.Ptr[int32](1), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - }, - }, - EarlyTermination: &armmachinelearning.MedianStoppingPolicy{ - DelayEvaluation: to.Ptr[int32](1), - EvaluationInterval: to.Ptr[int32](1), - PolicyType: to.Ptr(armmachinelearning.EarlyTerminationPolicyTypeMedianStopping), - }, - Limits: &armmachinelearning.SweepJobLimits{ - JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeSweep), - MaxConcurrentTrials: to.Ptr[int32](1), - MaxTotalTrials: to.Ptr[int32](1), - TrialTimeout: to.Ptr("PT1S"), - }, - Objective: &armmachinelearning.Objective{ - Goal: to.Ptr(armmachinelearning.GoalMinimize), - PrimaryMetric: to.Ptr("string"), - }, - SamplingAlgorithm: &armmachinelearning.GridSamplingAlgorithm{ - SamplingAlgorithmType: to.Ptr(armmachinelearning.SamplingAlgorithmTypeGrid), - }, - SearchSpace: map[string]any{ - "string": map[string]any{}, - }, - Trial: &armmachinelearning.TrialComponent{ - CodeID: to.Ptr("string"), - Command: to.Ptr("string"), - Distribution: &armmachinelearning.Mpi{ - DistributionType: to.Ptr(armmachinelearning.DistributionTypeMpi), - ProcessCountPerInstance: to.Ptr[int32](1), - }, - EnvironmentID: to.Ptr("string"), - EnvironmentVariables: map[string]*string{ - "string": to.Ptr("string"), - }, - Resources: &armmachinelearning.JobResourceConfiguration{ - InstanceCount: to.Ptr[int32](1), - InstanceType: to.Ptr("string"), - Properties: map[string]any{ - "string": map[string]any{ - "e6b6493e-7d5e-4db3-be1e-306ec641327e": nil, - }, - }, - }, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.JobBase = armmachinelearning.JobBase{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.SweepJob{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ComputeID: to.Ptr("string"), - // DisplayName: to.Ptr("string"), - // ExperimentName: to.Ptr("string"), - // JobType: to.Ptr(armmachinelearning.JobTypeSweep), - // Services: map[string]*armmachinelearning.JobService{ - // "string": &armmachinelearning.JobService{ - // Endpoint: to.Ptr("string"), - // ErrorMessage: to.Ptr("string"), - // JobServiceType: to.Ptr("string"), - // Port: to.Ptr[int32](1), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Status: to.Ptr("string"), - // }, - // }, - // Status: to.Ptr(armmachinelearning.JobStatusNotStarted), - // EarlyTermination: &armmachinelearning.MedianStoppingPolicy{ - // DelayEvaluation: to.Ptr[int32](1), - // EvaluationInterval: to.Ptr[int32](1), - // PolicyType: to.Ptr(armmachinelearning.EarlyTerminationPolicyTypeMedianStopping), - // }, - // Limits: &armmachinelearning.SweepJobLimits{ - // JobLimitsType: to.Ptr(armmachinelearning.JobLimitsTypeSweep), - // MaxConcurrentTrials: to.Ptr[int32](1), - // MaxTotalTrials: to.Ptr[int32](1), - // TrialTimeout: to.Ptr("PT1S"), - // }, - // Objective: &armmachinelearning.Objective{ - // Goal: to.Ptr(armmachinelearning.GoalMinimize), - // PrimaryMetric: to.Ptr("string"), - // }, - // SamplingAlgorithm: &armmachinelearning.GridSamplingAlgorithm{ - // SamplingAlgorithmType: to.Ptr(armmachinelearning.SamplingAlgorithmTypeGrid), - // }, - // SearchSpace: map[string]any{ - // "string":map[string]any{ - // }, - // }, - // Trial: &armmachinelearning.TrialComponent{ - // CodeID: to.Ptr("string"), - // Command: to.Ptr("string"), - // Distribution: &armmachinelearning.Mpi{ - // DistributionType: to.Ptr(armmachinelearning.DistributionTypeMpi), - // ProcessCountPerInstance: to.Ptr[int32](1), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Resources: &armmachinelearning.JobResourceConfiguration{ - // InstanceCount: to.Ptr[int32](1), - // InstanceType: to.Ptr("string"), - // Properties: map[string]any{ - // "string": map[string]any{ - // "e6b6493e-7d5e-4db3-be1e-306ec641327e": nil, - // }, - // }, - // }, - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Job/cancel.json -func ExampleJobsClient_BeginCancel() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewJobsClient().BeginCancel(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/labelingjobs_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/labelingjobs_client.go new file mode 100644 index 000000000000..307814e8e511 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/labelingjobs_client.go @@ -0,0 +1,513 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// LabelingJobsClient contains the methods for the LabelingJobs group. +// Don't use this type directly, use NewLabelingJobsClient() instead. +type LabelingJobsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewLabelingJobsClient creates a new instance of LabelingJobsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewLabelingJobsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LabelingJobsClient, error) { + cl, err := arm.NewClient(moduleName+".LabelingJobsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &LabelingJobsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Creates or updates a labeling job (asynchronous). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - id - The name and identifier for the LabelingJob. +// - body - LabelingJob definition object. +// - options - LabelingJobsClientBeginCreateOrUpdateOptions contains the optional parameters for the LabelingJobsClient.BeginCreateOrUpdate +// method. +func (client *LabelingJobsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, id string, body LabelingJob, options *LabelingJobsClientBeginCreateOrUpdateOptions) (*runtime.Poller[LabelingJobsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, id, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LabelingJobsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[LabelingJobsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Creates or updates a labeling job (asynchronous). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *LabelingJobsClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, id string, body LabelingJob, options *LabelingJobsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, id, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *LabelingJobsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, id string, body LabelingJob, options *LabelingJobsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if id == "" { + return nil, errors.New("parameter id cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{id}", url.PathEscape(id)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// Delete - Delete a labeling job. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - id - The name and identifier for the LabelingJob. +// - options - LabelingJobsClientDeleteOptions contains the optional parameters for the LabelingJobsClient.Delete method. +func (client *LabelingJobsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientDeleteOptions) (LabelingJobsClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, id, options) + if err != nil { + return LabelingJobsClientDeleteResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LabelingJobsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusNoContent) { + return LabelingJobsClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return LabelingJobsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *LabelingJobsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if id == "" { + return nil, errors.New("parameter id cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{id}", url.PathEscape(id)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginExportLabels - Export labels from a labeling job (asynchronous). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - id - The name and identifier for the LabelingJob. +// - body - The export summary. +// - options - LabelingJobsClientBeginExportLabelsOptions contains the optional parameters for the LabelingJobsClient.BeginExportLabels +// method. +func (client *LabelingJobsClient) BeginExportLabels(ctx context.Context, resourceGroupName string, workspaceName string, id string, body ExportSummaryClassification, options *LabelingJobsClientBeginExportLabelsOptions) (*runtime.Poller[LabelingJobsClientExportLabelsResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.exportLabels(ctx, resourceGroupName, workspaceName, id, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LabelingJobsClientExportLabelsResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[LabelingJobsClientExportLabelsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// ExportLabels - Export labels from a labeling job (asynchronous). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *LabelingJobsClient) exportLabels(ctx context.Context, resourceGroupName string, workspaceName string, id string, body ExportSummaryClassification, options *LabelingJobsClientBeginExportLabelsOptions) (*http.Response, error) { + req, err := client.exportLabelsCreateRequest(ctx, resourceGroupName, workspaceName, id, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// exportLabelsCreateRequest creates the ExportLabels request. +func (client *LabelingJobsClient) exportLabelsCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, id string, body ExportSummaryClassification, options *LabelingJobsClientBeginExportLabelsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/exportLabels" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if id == "" { + return nil, errors.New("parameter id cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{id}", url.PathEscape(id)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// Get - Gets a labeling job by name/id. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - id - The name and identifier for the LabelingJob. +// - options - LabelingJobsClientGetOptions contains the optional parameters for the LabelingJobsClient.Get method. +func (client *LabelingJobsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientGetOptions) (LabelingJobsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, workspaceName, id, options) + if err != nil { + return LabelingJobsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LabelingJobsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LabelingJobsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *LabelingJobsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if id == "" { + return nil, errors.New("parameter id cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{id}", url.PathEscape(id)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.IncludeJobInstructions != nil { + reqQP.Set("includeJobInstructions", strconv.FormatBool(*options.IncludeJobInstructions)) + } + if options != nil && options.IncludeLabelCategories != nil { + reqQP.Set("includeLabelCategories", strconv.FormatBool(*options.IncludeLabelCategories)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *LabelingJobsClient) getHandleResponse(resp *http.Response) (LabelingJobsClientGetResponse, error) { + result := LabelingJobsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LabelingJob); err != nil { + return LabelingJobsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Lists labeling jobs in the workspace. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - options - LabelingJobsClientListOptions contains the optional parameters for the LabelingJobsClient.NewListPager method. +func (client *LabelingJobsClient) NewListPager(resourceGroupName string, workspaceName string, options *LabelingJobsClientListOptions) *runtime.Pager[LabelingJobsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[LabelingJobsClientListResponse]{ + More: func(page LabelingJobsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *LabelingJobsClientListResponse) (LabelingJobsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, workspaceName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return LabelingJobsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LabelingJobsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LabelingJobsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *LabelingJobsClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *LabelingJobsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *LabelingJobsClient) listHandleResponse(resp *http.Response) (LabelingJobsClientListResponse, error) { + result := LabelingJobsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LabelingJobResourceArmPaginatedResult); err != nil { + return LabelingJobsClientListResponse{}, err + } + return result, nil +} + +// Pause - Pause a labeling job. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - id - The name and identifier for the LabelingJob. +// - options - LabelingJobsClientPauseOptions contains the optional parameters for the LabelingJobsClient.Pause method. +func (client *LabelingJobsClient) Pause(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientPauseOptions) (LabelingJobsClientPauseResponse, error) { + req, err := client.pauseCreateRequest(ctx, resourceGroupName, workspaceName, id, options) + if err != nil { + return LabelingJobsClientPauseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LabelingJobsClientPauseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LabelingJobsClientPauseResponse{}, runtime.NewResponseError(resp) + } + return LabelingJobsClientPauseResponse{}, nil +} + +// pauseCreateRequest creates the Pause request. +func (client *LabelingJobsClient) pauseCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientPauseOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/pause" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if id == "" { + return nil, errors.New("parameter id cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{id}", url.PathEscape(id)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginResume - Resume a labeling job (asynchronous). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - id - The name and identifier for the LabelingJob. +// - options - LabelingJobsClientBeginResumeOptions contains the optional parameters for the LabelingJobsClient.BeginResume +// method. +func (client *LabelingJobsClient) BeginResume(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientBeginResumeOptions) (*runtime.Poller[LabelingJobsClientResumeResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.resume(ctx, resourceGroupName, workspaceName, id, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LabelingJobsClientResumeResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[LabelingJobsClientResumeResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Resume - Resume a labeling job (asynchronous). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *LabelingJobsClient) resume(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientBeginResumeOptions) (*http.Response, error) { + req, err := client.resumeCreateRequest(ctx, resourceGroupName, workspaceName, id, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// resumeCreateRequest creates the Resume request. +func (client *LabelingJobsClient) resumeCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, id string, options *LabelingJobsClientBeginResumeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/resume" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if id == "" { + return nil, errors.New("parameter id cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{id}", url.PathEscape(id)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/managednetworkprovisions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/managednetworkprovisions_client.go new file mode 100644 index 000000000000..1a62d47ca202 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/managednetworkprovisions_client.go @@ -0,0 +1,115 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ManagedNetworkProvisionsClient contains the methods for the ManagedNetworkProvisions group. +// Don't use this type directly, use NewManagedNetworkProvisionsClient() instead. +type ManagedNetworkProvisionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewManagedNetworkProvisionsClient creates a new instance of ManagedNetworkProvisionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewManagedNetworkProvisionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ManagedNetworkProvisionsClient, error) { + cl, err := arm.NewClient(moduleName+".ManagedNetworkProvisionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &ManagedNetworkProvisionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginProvisionManagedNetwork - Provisions the managed network of a machine learning workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - options - ManagedNetworkProvisionsClientBeginProvisionManagedNetworkOptions contains the optional parameters for the ManagedNetworkProvisionsClient.BeginProvisionManagedNetwork +// method. +func (client *ManagedNetworkProvisionsClient) BeginProvisionManagedNetwork(ctx context.Context, resourceGroupName string, workspaceName string, options *ManagedNetworkProvisionsClientBeginProvisionManagedNetworkOptions) (*runtime.Poller[ManagedNetworkProvisionsClientProvisionManagedNetworkResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.provisionManagedNetwork(ctx, resourceGroupName, workspaceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ManagedNetworkProvisionsClientProvisionManagedNetworkResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[ManagedNetworkProvisionsClientProvisionManagedNetworkResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// ProvisionManagedNetwork - Provisions the managed network of a machine learning workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *ManagedNetworkProvisionsClient) provisionManagedNetwork(ctx context.Context, resourceGroupName string, workspaceName string, options *ManagedNetworkProvisionsClientBeginProvisionManagedNetworkOptions) (*http.Response, error) { + req, err := client.provisionManagedNetworkCreateRequest(ctx, resourceGroupName, workspaceName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// provisionManagedNetworkCreateRequest creates the ProvisionManagedNetwork request. +func (client *ManagedNetworkProvisionsClient) provisionManagedNetworkCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *ManagedNetworkProvisionsClientBeginProvisionManagedNetworkOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/provisionManagedNetwork" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Body != nil { + return req, runtime.MarshalAsJSON(req, *options.Body) + } + return req, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/managednetworksettingsrule_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/managednetworksettingsrule_client.go new file mode 100644 index 000000000000..7dd083172288 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/managednetworksettingsrule_client.go @@ -0,0 +1,321 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ManagedNetworkSettingsRuleClient contains the methods for the ManagedNetworkSettingsRule group. +// Don't use this type directly, use NewManagedNetworkSettingsRuleClient() instead. +type ManagedNetworkSettingsRuleClient struct { + internal *arm.Client + subscriptionID string +} + +// NewManagedNetworkSettingsRuleClient creates a new instance of ManagedNetworkSettingsRuleClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewManagedNetworkSettingsRuleClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ManagedNetworkSettingsRuleClient, error) { + cl, err := arm.NewClient(moduleName+".ManagedNetworkSettingsRuleClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &ManagedNetworkSettingsRuleClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Creates or updates an outbound rule in the managed network of a machine learning workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - ruleName - Name of the workspace managed network outbound rule +// - body - Outbound Rule to be created or updated in the managed network of a machine learning workspace. +// - options - ManagedNetworkSettingsRuleClientBeginCreateOrUpdateOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.BeginCreateOrUpdate +// method. +func (client *ManagedNetworkSettingsRuleClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, body OutboundRuleBasicResource, options *ManagedNetworkSettingsRuleClientBeginCreateOrUpdateOptions) (*runtime.Poller[ManagedNetworkSettingsRuleClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, ruleName, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ManagedNetworkSettingsRuleClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[ManagedNetworkSettingsRuleClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Creates or updates an outbound rule in the managed network of a machine learning workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *ManagedNetworkSettingsRuleClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, body OutboundRuleBasicResource, options *ManagedNetworkSettingsRuleClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, ruleName, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *ManagedNetworkSettingsRuleClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, body OutboundRuleBasicResource, options *ManagedNetworkSettingsRuleClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if ruleName == "" { + return nil, errors.New("parameter ruleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{ruleName}", url.PathEscape(ruleName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Deletes an outbound rule from the managed network of a machine learning workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - ruleName - Name of the workspace managed network outbound rule +// - options - ManagedNetworkSettingsRuleClientBeginDeleteOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.BeginDelete +// method. +func (client *ManagedNetworkSettingsRuleClient) BeginDelete(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, options *ManagedNetworkSettingsRuleClientBeginDeleteOptions) (*runtime.Poller[ManagedNetworkSettingsRuleClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, workspaceName, ruleName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[ManagedNetworkSettingsRuleClientDeleteResponse](resp, client.internal.Pipeline(), nil) + } else { + return runtime.NewPollerFromResumeToken[ManagedNetworkSettingsRuleClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Deletes an outbound rule from the managed network of a machine learning workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *ManagedNetworkSettingsRuleClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, options *ManagedNetworkSettingsRuleClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, ruleName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *ManagedNetworkSettingsRuleClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, options *ManagedNetworkSettingsRuleClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if ruleName == "" { + return nil, errors.New("parameter ruleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{ruleName}", url.PathEscape(ruleName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets an outbound rule from the managed network of a machine learning workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - ruleName - Name of the workspace managed network outbound rule +// - options - ManagedNetworkSettingsRuleClientGetOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.Get +// method. +func (client *ManagedNetworkSettingsRuleClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, options *ManagedNetworkSettingsRuleClientGetOptions) (ManagedNetworkSettingsRuleClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, workspaceName, ruleName, options) + if err != nil { + return ManagedNetworkSettingsRuleClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ManagedNetworkSettingsRuleClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ManagedNetworkSettingsRuleClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *ManagedNetworkSettingsRuleClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, options *ManagedNetworkSettingsRuleClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if ruleName == "" { + return nil, errors.New("parameter ruleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{ruleName}", url.PathEscape(ruleName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *ManagedNetworkSettingsRuleClient) getHandleResponse(resp *http.Response) (ManagedNetworkSettingsRuleClientGetResponse, error) { + result := ManagedNetworkSettingsRuleClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OutboundRuleBasicResource); err != nil { + return ManagedNetworkSettingsRuleClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Lists the managed network outbound rules for a machine learning workspace. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - options - ManagedNetworkSettingsRuleClientListOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.NewListPager +// method. +func (client *ManagedNetworkSettingsRuleClient) NewListPager(resourceGroupName string, workspaceName string, options *ManagedNetworkSettingsRuleClientListOptions) *runtime.Pager[ManagedNetworkSettingsRuleClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[ManagedNetworkSettingsRuleClientListResponse]{ + More: func(page ManagedNetworkSettingsRuleClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ManagedNetworkSettingsRuleClientListResponse) (ManagedNetworkSettingsRuleClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, workspaceName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ManagedNetworkSettingsRuleClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ManagedNetworkSettingsRuleClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ManagedNetworkSettingsRuleClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *ManagedNetworkSettingsRuleClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *ManagedNetworkSettingsRuleClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *ManagedNetworkSettingsRuleClient) listHandleResponse(resp *http.Response) (ManagedNetworkSettingsRuleClientListResponse, error) { + result := ManagedNetworkSettingsRuleClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OutboundRuleListResult); err != nil { + return ManagedNetworkSettingsRuleClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/modelcontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/modelcontainers_client.go index b0815593f0aa..df328815d89c 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/modelcontainers_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/modelcontainers_client.go @@ -48,7 +48,7 @@ func NewModelContainersClient(subscriptionID string, credential azcore.TokenCred // CreateOrUpdate - Create or update container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -94,7 +94,7 @@ func (client *ModelContainersClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -112,7 +112,7 @@ func (client *ModelContainersClient) createOrUpdateHandleResponse(resp *http.Res // Delete - Delete container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -156,7 +156,7 @@ func (client *ModelContainersClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -165,7 +165,7 @@ func (client *ModelContainersClient) deleteCreateRequest(ctx context.Context, re // Get - Get container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -209,7 +209,7 @@ func (client *ModelContainersClient) getCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -226,7 +226,7 @@ func (client *ModelContainersClient) getHandleResponse(resp *http.Response) (Mod // NewListPager - List model containers. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - ModelContainersClientListOptions contains the optional parameters for the ModelContainersClient.NewListPager @@ -279,7 +279,7 @@ func (client *ModelContainersClient) listCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/modelcontainers_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/modelcontainers_client_example_test.go deleted file mode 100644 index 688be0b4e4ef..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/modelcontainers_client_example_test.go +++ /dev/null @@ -1,175 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelContainer/list.json -func ExampleModelContainersClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewModelContainersClient().NewListPager("testrg123", "workspace123", &armmachinelearning.ModelContainersClientListOptions{Skip: nil, - Count: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ModelContainerResourceArmPaginatedResult = armmachinelearning.ModelContainerResourceArmPaginatedResult{ - // Value: []*armmachinelearning.ModelContainer{ - // { - // Name: to.Ptr("testContainer"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/models"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspace123/models/testContainer"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ModelContainerProperties{ - // Description: to.Ptr("Model container description"), - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelContainer/delete.json -func ExampleModelContainersClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewModelContainersClient().Delete(ctx, "testrg123", "workspace123", "testContainer", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelContainer/get.json -func ExampleModelContainersClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewModelContainersClient().Get(ctx, "testrg123", "workspace123", "testContainer", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ModelContainer = armmachinelearning.ModelContainer{ - // Name: to.Ptr("testContainer"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/models"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspace123/models/testContainer"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ModelContainerProperties{ - // Description: to.Ptr("Model container description"), - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelContainer/createOrUpdate.json -func ExampleModelContainersClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewModelContainersClient().CreateOrUpdate(ctx, "testrg123", "workspace123", "testContainer", armmachinelearning.ModelContainer{ - Properties: &armmachinelearning.ModelContainerProperties{ - Description: to.Ptr("Model container description"), - Tags: map[string]*string{ - "tag1": to.Ptr("value1"), - "tag2": to.Ptr("value2"), - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ModelContainer = armmachinelearning.ModelContainer{ - // Name: to.Ptr("testContainer"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/models"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg123/providers/Microsoft.MachineLearningServices/workspaces/workspace123/models/testContainer"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // CreatedBy: to.Ptr("John Smith"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-12-01T12:00:00.000Z"); return t}()), - // LastModifiedBy: to.Ptr("John Smith"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ModelContainerProperties{ - // Description: to.Ptr("Model container description"), - // Tags: map[string]*string{ - // "tag1": to.Ptr("value1"), - // "tag2": to.Ptr("value2"), - // }, - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/models.go b/sdk/resourcemanager/machinelearning/armmachinelearning/models.go index e3d3c2569066..ed8cb77fd91e 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/models.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/models.go @@ -99,6 +99,68 @@ type AKSSchemaProperties struct { SystemServices []*SystemService } +// APIKeyAuthWorkspaceConnectionProperties - This connection type covers the generic ApiKey auth connection categories, for +// examples: AzureOpenAI: Category:= AzureOpenAI AuthType:= ApiKey (as type discriminator) Credentials:= {ApiKey} as +// Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey Target:= {ApiBase} +// CognitiveService: Category:= CognitiveService AuthType:= ApiKey (as type discriminator) Credentials:= {SubscriptionKey} +// as Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey +// Target:= ServiceRegion={serviceRegion} +// CognitiveSearch: Category:= CognitiveSearch AuthType:= ApiKey (as type discriminator) Credentials:= {Key} as Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey +// Target:= +// {Endpoint} +// Use Metadata property bag for ApiType, ApiVersion, Kind and other metadata fields +type APIKeyAuthWorkspaceConnectionProperties struct { + // REQUIRED; Authentication type of the connection target + AuthType *ConnectionAuthType + + // Category of the connection + Category *ConnectionCategory + + // Api key object for workspace connection credential. + Credentials *WorkspaceConnectionAPIKey + ExpiryTime *time.Time + + // Anything + Metadata any + Target *string +} + +// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type APIKeyAuthWorkspaceConnectionProperties. +func (a *APIKeyAuthWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { + return &WorkspaceConnectionPropertiesV2{ + AuthType: a.AuthType, + Category: a.Category, + ExpiryTime: a.ExpiryTime, + Metadata: a.Metadata, + Target: a.Target, + } +} + +type AccessKeyAuthTypeWorkspaceConnectionProperties struct { + // REQUIRED; Authentication type of the connection target + AuthType *ConnectionAuthType + + // Category of the connection + Category *ConnectionCategory + Credentials *WorkspaceConnectionAccessKey + ExpiryTime *time.Time + + // Anything + Metadata any + Target *string +} + +// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type AccessKeyAuthTypeWorkspaceConnectionProperties. +func (a *AccessKeyAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { + return &WorkspaceConnectionPropertiesV2{ + AuthType: a.AuthType, + Category: a.Category, + ExpiryTime: a.ExpiryTime, + Metadata: a.Metadata, + Target: a.Target, + } +} + // AccountKeyDatastoreCredentials - Account key datastore credentials configuration. type AccountKeyDatastoreCredentials struct { // REQUIRED; [Required] Credential type used to authentication with storage. @@ -131,6 +193,15 @@ func (a *AccountKeyDatastoreSecrets) GetDatastoreSecrets() *DatastoreSecrets { } } +// AcrDetails - Details of ACR account to be used for the Registry +type AcrDetails struct { + // Details of system created ACR account to be used for the Registry + SystemCreatedAcrAccount *SystemCreatedAcrAccount + + // Details of user created ACR account to be used for the Registry + UserCreatedAcrAccount *UserCreatedAcrAccount +} + // AksComputeSecrets - Secrets related to a Machine Learning compute based on AKS. type AksComputeSecrets struct { // REQUIRED; The type of compute @@ -182,6 +253,31 @@ type AksNetworkingConfiguration struct { SubnetID *string } +type AllFeatures struct { + // REQUIRED; [Required] Specifies the feature filter to leverage when selecting features to calculate metrics over. + FilterType *MonitoringFeatureFilterType +} + +// GetMonitoringFeatureFilterBase implements the MonitoringFeatureFilterBaseClassification interface for type AllFeatures. +func (a *AllFeatures) GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase { + return &MonitoringFeatureFilterBase{ + FilterType: a.FilterType, + } +} + +// AllNodes - All nodes means the service will be running on all of the nodes of the job +type AllNodes struct { + // REQUIRED; [Required] Type of the Nodes value + NodesValueType *NodesValueType +} + +// GetNodes implements the NodesClassification interface for type AllNodes. +func (a *AllNodes) GetNodes() *Nodes { + return &Nodes{ + NodesValueType: a.NodesValueType, + } +} + // AmlCompute - An Azure Machine Learning compute. type AmlCompute struct { // REQUIRED; The type of compute @@ -337,36 +433,24 @@ type AmlComputeSchema struct { Properties *AmlComputeProperties } -// AmlOperation - Azure Machine Learning workspace REST API operation +// AmlOperation - Azure Machine Learning team account REST API operation type AmlOperation struct { - // Display name of operation - Display *AmlOperationDisplay + // Gets or sets display name of operation + Display *OperationDisplay // Indicates whether the operation applies to data-plane IsDataAction *bool - // Operation name: {provider}/{resource}/{operation} + // Gets or sets operation name: {provider}/{resource}/{operation} Name *string -} - -// AmlOperationDisplay - Display name of operation -type AmlOperationDisplay struct { - // The description for the operation. - Description *string - - // The operation that users can perform. - Operation *string - - // The resource provider name: Microsoft.MachineLearningExperimentation - Provider *string - // The resource on which the operation is performed. - Resource *string + // The intended executor of the operation: user/system + Origin *string } // AmlOperationListResult - An array of operations supported by the resource provider. type AmlOperationListResult struct { - // List of AML workspace operations supported by the AML workspace resource provider. + // Gets or sets list of AML team account operations supported by the AML team account resource provider. Value []*AmlOperation } @@ -383,6 +467,19 @@ func (a *AmlToken) GetIdentityConfiguration() *IdentityConfiguration { } } +// AmlTokenComputeIdentity - AML token compute identity definition. +type AmlTokenComputeIdentity struct { + // REQUIRED; [Required] Monitor compute identity type enum. + ComputeIdentityType *MonitorComputeIdentityType +} + +// GetMonitorComputeIdentityBase implements the MonitorComputeIdentityBaseClassification interface for type AmlTokenComputeIdentity. +func (a *AmlTokenComputeIdentity) GetMonitorComputeIdentityBase() *MonitorComputeIdentityBase { + return &MonitorComputeIdentityBase{ + ComputeIdentityType: a.ComputeIdentityType, + } +} + // AmlUserFeature - Features enabled for a workspace type AmlUserFeature struct { // Describes the feature for user experience @@ -395,14 +492,26 @@ type AmlUserFeature struct { ID *string } +// ArmResourceID - ARM ResourceId of a resource +type ArmResourceID struct { + // Arm ResourceId is in the format "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}" + // or + // "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}" + ResourceID *string +} + type AssetBase struct { + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + // The asset description text. Description *string - // If the name version are system generated (anonymous registration). + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous IsAnonymous *bool - // Is the asset archived? + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived IsArchived *bool // The asset property dictionary. @@ -443,6 +552,15 @@ type AssetJobInput struct { // AssetJobOutput - Asset output type. type AssetJobOutput struct { + // Output Asset Name. + AssetName *string + + // Output Asset Version. + AssetVersion *string + + // Auto delete setting of output data asset. + AutoDeleteSetting *AutoDeleteSetting + // Output Asset Delivery Mode. Mode *OutputDeliveryMode @@ -477,6 +595,14 @@ type AssignedUser struct { TenantID *string } +type AutoDeleteSetting struct { + // When to check if an asset is expired + Condition *AutoDeleteCondition + + // Expiration condition value. + Value *string +} + // AutoForecastHorizon - Forecast horizon determined automatically by system. type AutoForecastHorizon struct { // REQUIRED; [Required] Set forecast horizon value selection mode. @@ -529,15 +655,24 @@ type AutoMLJob struct { // Is the asset archived? IsArchived *bool + // Notification setting for the job + NotificationSetting *NotificationSetting + // Mapping of output data bindings used in the job. Outputs map[string]JobOutputClassification // The asset property dictionary. Properties map[string]*string + // Queue settings for the job + QueueSettings *QueueSettings + // Compute Resource configuration for the job. Resources *JobResourceConfiguration + // Configuration for secrets to be made available during runtime. + SecretsConfiguration map[string]*SecretConfiguration + // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. Services map[string]*JobService @@ -551,18 +686,20 @@ type AutoMLJob struct { // GetJobBaseProperties implements the JobBasePropertiesClassification interface for type AutoMLJob. func (a *AutoMLJob) GetJobBaseProperties() *JobBaseProperties { return &JobBaseProperties{ - ComponentID: a.ComponentID, - ComputeID: a.ComputeID, - DisplayName: a.DisplayName, - ExperimentName: a.ExperimentName, - Identity: a.Identity, - IsArchived: a.IsArchived, - JobType: a.JobType, - Services: a.Services, - Status: a.Status, - Description: a.Description, - Properties: a.Properties, - Tags: a.Tags, + ComponentID: a.ComponentID, + ComputeID: a.ComputeID, + DisplayName: a.DisplayName, + ExperimentName: a.ExperimentName, + Identity: a.Identity, + IsArchived: a.IsArchived, + JobType: a.JobType, + NotificationSetting: a.NotificationSetting, + SecretsConfiguration: a.SecretsConfiguration, + Services: a.Services, + Status: a.Status, + Description: a.Description, + Properties: a.Properties, + Tags: a.Tags, } } @@ -657,6 +794,25 @@ func (a *AutoTargetRollingWindowSize) GetTargetRollingWindowSize() *TargetRollin } } +// AutologgerSettings - Settings for Autologger. +type AutologgerSettings struct { + // REQUIRED; [Required] Indicates whether mlflow autologger is enabled. + MlflowAutologger *MLFlowAutologgerState +} + +type AzMonMonitoringAlertNotificationSettings struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + AlertNotificationType *MonitoringAlertNotificationType +} + +// GetMonitoringAlertNotificationSettingsBase implements the MonitoringAlertNotificationSettingsBaseClassification interface +// for type AzMonMonitoringAlertNotificationSettings. +func (a *AzMonMonitoringAlertNotificationSettings) GetMonitoringAlertNotificationSettingsBase() *MonitoringAlertNotificationSettingsBase { + return &MonitoringAlertNotificationSettingsBase{ + AlertNotificationType: a.AlertNotificationType, + } +} + // AzureBlobDatastore - Azure Blob datastore configuration. type AzureBlobDatastore struct { // REQUIRED; [Required] Account credentials. @@ -677,15 +833,24 @@ type AzureBlobDatastore struct { // Azure cloud endpoint for the storage account. Endpoint *string + // Intellectual Property details. + IntellectualProperty *IntellectualProperty + // The asset property dictionary. Properties map[string]*string // Protocol used to communicate with the storage account. Protocol *string + // Azure Resource Group name + ResourceGroup *string + // Indicates which identity to use to authenticate service data access to customer's storage. ServiceDataAccessAuthIdentity *ServiceDataAccessAuthIdentity + // Azure Subscription Id + SubscriptionID *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string @@ -696,12 +861,13 @@ type AzureBlobDatastore struct { // GetDatastoreProperties implements the DatastorePropertiesClassification interface for type AzureBlobDatastore. func (a *AzureBlobDatastore) GetDatastoreProperties() *DatastoreProperties { return &DatastoreProperties{ - Credentials: a.Credentials, - DatastoreType: a.DatastoreType, - IsDefault: a.IsDefault, - Description: a.Description, - Properties: a.Properties, - Tags: a.Tags, + Credentials: a.Credentials, + DatastoreType: a.DatastoreType, + IntellectualProperty: a.IntellectualProperty, + IsDefault: a.IsDefault, + Description: a.Description, + Properties: a.Properties, + Tags: a.Tags, } } @@ -719,12 +885,21 @@ type AzureDataLakeGen1Datastore struct { // The asset description text. Description *string + // Intellectual Property details. + IntellectualProperty *IntellectualProperty + // The asset property dictionary. Properties map[string]*string + // Azure Resource Group name + ResourceGroup *string + // Indicates which identity to use to authenticate service data access to customer's storage. ServiceDataAccessAuthIdentity *ServiceDataAccessAuthIdentity + // Azure Subscription Id + SubscriptionID *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string @@ -735,12 +910,13 @@ type AzureDataLakeGen1Datastore struct { // GetDatastoreProperties implements the DatastorePropertiesClassification interface for type AzureDataLakeGen1Datastore. func (a *AzureDataLakeGen1Datastore) GetDatastoreProperties() *DatastoreProperties { return &DatastoreProperties{ - Credentials: a.Credentials, - DatastoreType: a.DatastoreType, - IsDefault: a.IsDefault, - Description: a.Description, - Properties: a.Properties, - Tags: a.Tags, + Credentials: a.Credentials, + DatastoreType: a.DatastoreType, + IntellectualProperty: a.IntellectualProperty, + IsDefault: a.IsDefault, + Description: a.Description, + Properties: a.Properties, + Tags: a.Tags, } } @@ -764,15 +940,24 @@ type AzureDataLakeGen2Datastore struct { // Azure cloud endpoint for the storage account. Endpoint *string + // Intellectual Property details. + IntellectualProperty *IntellectualProperty + // The asset property dictionary. Properties map[string]*string // Protocol used to communicate with the storage account. Protocol *string + // Azure Resource Group name + ResourceGroup *string + // Indicates which identity to use to authenticate service data access to customer's storage. ServiceDataAccessAuthIdentity *ServiceDataAccessAuthIdentity + // Azure Subscription Id + SubscriptionID *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string @@ -783,12 +968,39 @@ type AzureDataLakeGen2Datastore struct { // GetDatastoreProperties implements the DatastorePropertiesClassification interface for type AzureDataLakeGen2Datastore. func (a *AzureDataLakeGen2Datastore) GetDatastoreProperties() *DatastoreProperties { return &DatastoreProperties{ - Credentials: a.Credentials, - DatastoreType: a.DatastoreType, - IsDefault: a.IsDefault, - Description: a.Description, - Properties: a.Properties, - Tags: a.Tags, + Credentials: a.Credentials, + DatastoreType: a.DatastoreType, + IntellectualProperty: a.IntellectualProperty, + IsDefault: a.IsDefault, + Description: a.Description, + Properties: a.Properties, + Tags: a.Tags, + } +} + +// AzureDatastore - Base definition for Azure datastore contents configuration. +type AzureDatastore struct { + // Azure Resource Group name + ResourceGroup *string + + // Azure Subscription Id + SubscriptionID *string +} + +// AzureDevOpsWebhook - Webhook details specific for Azure DevOps +type AzureDevOpsWebhook struct { + // REQUIRED; [Required] Specifies the type of service to send a callback + WebhookType *WebhookType + + // Send callback on a specified notification event + EventType *string +} + +// GetWebhook implements the WebhookClassification interface for type AzureDevOpsWebhook. +func (a *AzureDevOpsWebhook) GetWebhook() *Webhook { + return &Webhook{ + EventType: a.EventType, + WebhookType: a.WebhookType, } } @@ -812,15 +1024,24 @@ type AzureFileDatastore struct { // Azure cloud endpoint for the storage account. Endpoint *string + // Intellectual Property details. + IntellectualProperty *IntellectualProperty + // The asset property dictionary. Properties map[string]*string // Protocol used to communicate with the storage account. Protocol *string + // Azure Resource Group name + ResourceGroup *string + // Indicates which identity to use to authenticate service data access to customer's storage. ServiceDataAccessAuthIdentity *ServiceDataAccessAuthIdentity + // Azure Subscription Id + SubscriptionID *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string @@ -831,12 +1052,45 @@ type AzureFileDatastore struct { // GetDatastoreProperties implements the DatastorePropertiesClassification interface for type AzureFileDatastore. func (a *AzureFileDatastore) GetDatastoreProperties() *DatastoreProperties { return &DatastoreProperties{ - Credentials: a.Credentials, - DatastoreType: a.DatastoreType, - IsDefault: a.IsDefault, - Description: a.Description, - Properties: a.Properties, - Tags: a.Tags, + Credentials: a.Credentials, + DatastoreType: a.DatastoreType, + IntellectualProperty: a.IntellectualProperty, + IsDefault: a.IsDefault, + Description: a.Description, + Properties: a.Properties, + Tags: a.Tags, + } +} + +// AzureMLBatchInferencingServer - Azure ML batch inferencing server configurations. +type AzureMLBatchInferencingServer struct { + // REQUIRED; [Required] Inferencing server type for various targets. + ServerType *InferencingServerType + + // Code configuration for AML batch inferencing server. + CodeConfiguration *CodeConfiguration +} + +// GetInferencingServer implements the InferencingServerClassification interface for type AzureMLBatchInferencingServer. +func (a *AzureMLBatchInferencingServer) GetInferencingServer() *InferencingServer { + return &InferencingServer{ + ServerType: a.ServerType, + } +} + +// AzureMLOnlineInferencingServer - Azure ML online inferencing configurations. +type AzureMLOnlineInferencingServer struct { + // REQUIRED; [Required] Inferencing server type for various targets. + ServerType *InferencingServerType + + // Code configuration for AML inferencing server. + CodeConfiguration *CodeConfiguration +} + +// GetInferencingServer implements the InferencingServerClassification interface for type AzureMLOnlineInferencingServer. +func (a *AzureMLOnlineInferencingServer) GetInferencingServer() *InferencingServer { + return &InferencingServer{ + ServerType: a.ServerType, } } @@ -867,6 +1121,39 @@ func (b *BanditPolicy) GetEarlyTerminationPolicy() *EarlyTerminationPolicy { } } +// BaseEnvironmentID - Base environment type. +type BaseEnvironmentID struct { + // REQUIRED; [Required] Base environment type. + BaseEnvironmentSourceType *BaseEnvironmentSourceType + + // REQUIRED; [Required] Resource id accepting ArmId or AzureMlId. + ResourceID *string +} + +// GetBaseEnvironmentSource implements the BaseEnvironmentSourceClassification interface for type BaseEnvironmentID. +func (b *BaseEnvironmentID) GetBaseEnvironmentSource() *BaseEnvironmentSource { + return &BaseEnvironmentSource{ + BaseEnvironmentSourceType: b.BaseEnvironmentSourceType, + } +} + +// BaseEnvironmentSourceClassification provides polymorphic access to related types. +// Call the interface's GetBaseEnvironmentSource() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *BaseEnvironmentID, *BaseEnvironmentSource +type BaseEnvironmentSourceClassification interface { + // GetBaseEnvironmentSource returns the BaseEnvironmentSource content of the underlying type. + GetBaseEnvironmentSource() *BaseEnvironmentSource +} + +type BaseEnvironmentSource struct { + // REQUIRED; [Required] Base environment type. + BaseEnvironmentSourceType *BaseEnvironmentSourceType +} + +// GetBaseEnvironmentSource implements the BaseEnvironmentSourceClassification interface for type BaseEnvironmentSource. +func (b *BaseEnvironmentSource) GetBaseEnvironmentSource() *BaseEnvironmentSource { return b } + type BatchDeployment struct { // REQUIRED; The geo-location where the resource lives Location *string @@ -899,6 +1186,26 @@ type BatchDeployment struct { Type *string } +// BatchDeploymentConfigurationClassification provides polymorphic access to related types. +// Call the interface's GetBatchDeploymentConfiguration() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *BatchDeploymentConfiguration, *BatchPipelineComponentDeploymentConfiguration +type BatchDeploymentConfigurationClassification interface { + // GetBatchDeploymentConfiguration returns the BatchDeploymentConfiguration content of the underlying type. + GetBatchDeploymentConfiguration() *BatchDeploymentConfiguration +} + +// BatchDeploymentConfiguration - Properties relevant to different deployment types. +type BatchDeploymentConfiguration struct { + // REQUIRED; [Required] The type of the deployment + DeploymentConfigurationType *BatchDeploymentConfigurationType +} + +// GetBatchDeploymentConfiguration implements the BatchDeploymentConfigurationClassification interface for type BatchDeploymentConfiguration. +func (b *BatchDeploymentConfiguration) GetBatchDeploymentConfiguration() *BatchDeploymentConfiguration { + return b +} + // BatchDeploymentProperties - Batch inference settings per deployment. type BatchDeploymentProperties struct { // Code configuration for the endpoint deployment. @@ -907,10 +1214,13 @@ type BatchDeploymentProperties struct { // Compute target for batch inference operation. Compute *string + // Properties relevant to different deployment types. + DeploymentConfiguration BatchDeploymentConfigurationClassification + // Description of the endpoint deployment. Description *string - // ARM resource ID or AssetId of the environment specification for the endpoint deployment. + // ARM resource ID of the environment specification for the endpoint deployment. EnvironmentID *string // Environment variables configuration for the deployment. @@ -1042,7 +1352,7 @@ type BatchEndpointProperties struct { // 'Key' doesn't expire but 'AMLToken' does. AuthMode *EndpointAuthMode - // Default values for Batch Endpoint + // Default values for Batch Endpoint. Defaults *BatchEndpointDefaults // Description of the inference endpoint. @@ -1111,6 +1421,31 @@ type BatchEndpointsClientListOptions struct { Skip *string } +// BatchPipelineComponentDeploymentConfiguration - Properties for a Batch Pipeline Component Deployment. +type BatchPipelineComponentDeploymentConfiguration struct { + // REQUIRED; [Required] The type of the deployment + DeploymentConfigurationType *BatchDeploymentConfigurationType + + // The ARM id of the component to be run. + ComponentID *IDAssetReference + + // The description which will be applied to the job. + Description *string + + // Run-time settings for the pipeline job. + Settings map[string]*string + + // The tags which will be applied to the job. + Tags map[string]*string +} + +// GetBatchDeploymentConfiguration implements the BatchDeploymentConfigurationClassification interface for type BatchPipelineComponentDeploymentConfiguration. +func (b *BatchPipelineComponentDeploymentConfiguration) GetBatchDeploymentConfiguration() *BatchDeploymentConfiguration { + return &BatchDeploymentConfiguration{ + DeploymentConfigurationType: b.DeploymentConfigurationType, + } +} + // BatchRetrySettings - Retry settings for a batch inference operation. type BatchRetrySettings struct { // Maximum retry count for a mini-batch @@ -1133,6 +1468,28 @@ func (b *BayesianSamplingAlgorithm) GetSamplingAlgorithm() *SamplingAlgorithm { } } +type BindOptions struct { + // Indicate whether to create host path. + CreateHostPath *bool + + // Type of Bind Option + Propagation *string + + // Mention the selinux options. + Selinux *string +} + +type BlobReferenceForConsumptionDto struct { + // Blob URI path for client to upload data. Example: https://blob.windows.core.net/Container/Path + BlobURI *string + + // Credential info to access storage account + Credential PendingUploadCredentialDtoClassification + + // Arm ID of the storage account to use + StorageAccountArmID *string +} + // BuildContext - Configuration settings for Docker build context type BuildContext struct { // REQUIRED; [Required] URI of the Docker build context used to build the image. Supports blob URIs on environment creation @@ -1143,6 +1500,98 @@ type BuildContext struct { DockerfilePath *string } +type CSVExportSummary struct { + // REQUIRED; [Required] The format of exported labels, also as the discriminator. + Format *ExportFormatType + + // READ-ONLY; The container name to which the labels will be exported. + ContainerName *string + + // READ-ONLY; The time when the export was completed. + EndDateTime *time.Time + + // READ-ONLY; The total number of labeled datapoints exported. + ExportedRowCount *int64 + + // READ-ONLY; Name and identifier of the job containing exported labels. + LabelingJobID *string + + // READ-ONLY; The output path where the labels will be exported. + SnapshotPath *string + + // READ-ONLY; The time when the export was requested. + StartDateTime *time.Time +} + +// GetExportSummary implements the ExportSummaryClassification interface for type CSVExportSummary. +func (c *CSVExportSummary) GetExportSummary() *ExportSummary { + return &ExportSummary{ + EndDateTime: c.EndDateTime, + ExportedRowCount: c.ExportedRowCount, + Format: c.Format, + LabelingJobID: c.LabelingJobID, + StartDateTime: c.StartDateTime, + } +} + +type CategoricalDataDriftMetricThreshold struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType + + // REQUIRED; [Required] The categorical data drift metric to calculate. + Metric *CategoricalDataDriftMetric + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetDataDriftMetricThresholdBase implements the DataDriftMetricThresholdBaseClassification interface for type CategoricalDataDriftMetricThreshold. +func (c *CategoricalDataDriftMetricThreshold) GetDataDriftMetricThresholdBase() *DataDriftMetricThresholdBase { + return &DataDriftMetricThresholdBase{ + DataType: c.DataType, + Threshold: c.Threshold, + } +} + +type CategoricalDataQualityMetricThreshold struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType + + // REQUIRED; [Required] The categorical data quality metric to calculate. + Metric *CategoricalDataQualityMetric + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetDataQualityMetricThresholdBase implements the DataQualityMetricThresholdBaseClassification interface for type CategoricalDataQualityMetricThreshold. +func (c *CategoricalDataQualityMetricThreshold) GetDataQualityMetricThresholdBase() *DataQualityMetricThresholdBase { + return &DataQualityMetricThresholdBase{ + DataType: c.DataType, + Threshold: c.Threshold, + } +} + +type CategoricalPredictionDriftMetricThreshold struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType + + // REQUIRED; [Required] The categorical prediction drift metric to calculate. + Metric *CategoricalPredictionDriftMetric + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetPredictionDriftMetricThresholdBase implements the PredictionDriftMetricThresholdBaseClassification interface for type +// CategoricalPredictionDriftMetricThreshold. +func (c *CategoricalPredictionDriftMetricThreshold) GetPredictionDriftMetricThresholdBase() *PredictionDriftMetricThresholdBase { + return &PredictionDriftMetricThresholdBase{ + DataType: c.DataType, + Threshold: c.Threshold, + } +} + // CertificateDatastoreCredentials - Certificate datastore credentials configuration. type CertificateDatastoreCredentials struct { // REQUIRED; [Required] Service principal client ID. @@ -1204,6 +1653,9 @@ type Classification struct { // Featurization inputs needed for AutoML job. FeaturizationSettings *TableVerticalFeaturizationSettings + // Model/training parameters that will remain constant throughout training. + FixedParameters *TableFixedParameters + // Execution constraints for AutoMLJob. LimitSettings *TableVerticalLimitSettings @@ -1219,6 +1671,12 @@ type Classification struct { // Primary metric for the task. PrimaryMetric *ClassificationPrimaryMetrics + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*TableParameterSubspace + + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *TableSweepSettings + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. TargetColumnName *string @@ -1254,6 +1712,26 @@ func (c *Classification) GetAutoMLVertical() *AutoMLVertical { } } +type ClassificationModelPerformanceMetricThreshold struct { + // REQUIRED; [Required] The classification model performance to calculate. + Metric *ClassificationModelPerformanceMetric + + // REQUIRED; [Required] Specifies the data type of the metric threshold. + ModelType *MonitoringModelType + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetModelPerformanceMetricThresholdBase implements the ModelPerformanceMetricThresholdBaseClassification interface for type +// ClassificationModelPerformanceMetricThreshold. +func (c *ClassificationModelPerformanceMetricThreshold) GetModelPerformanceMetricThresholdBase() *ModelPerformanceMetricThresholdBase { + return &ModelPerformanceMetricThresholdBase{ + ModelType: c.ModelType, + Threshold: c.Threshold, + } +} + // ClassificationTrainingSettings - Classification Training related configuration. type ClassificationTrainingSettings struct { // Allowed models for classification task. @@ -1284,6 +1762,12 @@ type ClassificationTrainingSettings struct { // Stack ensemble settings for stack ensemble run. StackEnsembleSettings *StackEnsembleSettings + + // TrainingMode mode - Setting to 'auto' is same as setting it to 'non-distributed' for now, however in the future may result + // in mixed mode or heuristics based mode selection. Default is 'auto'. If + // 'Distributed' then only distributed featurization is used and distributed algorithms are chosen. If 'NonDistributed' then + // only non distributed algorithms are chosen. + TrainingMode *TrainingMode } // ClusterUpdateParameters - AmlCompute update parameters. @@ -1298,6 +1782,40 @@ type ClusterUpdateProperties struct { Properties *ScaleSettingsInformation } +type CocoExportSummary struct { + // REQUIRED; [Required] The format of exported labels, also as the discriminator. + Format *ExportFormatType + + // READ-ONLY; The container name to which the labels will be exported. + ContainerName *string + + // READ-ONLY; The time when the export was completed. + EndDateTime *time.Time + + // READ-ONLY; The total number of labeled datapoints exported. + ExportedRowCount *int64 + + // READ-ONLY; Name and identifier of the job containing exported labels. + LabelingJobID *string + + // READ-ONLY; The output path where the labels will be exported. + SnapshotPath *string + + // READ-ONLY; The time when the export was requested. + StartDateTime *time.Time +} + +// GetExportSummary implements the ExportSummaryClassification interface for type CocoExportSummary. +func (c *CocoExportSummary) GetExportSummary() *ExportSummary { + return &ExportSummary{ + EndDateTime: c.EndDateTime, + ExportedRowCount: c.ExportedRowCount, + Format: c.Format, + LabelingJobID: c.LabelingJobID, + StartDateTime: c.StartDateTime, + } +} + // CodeConfiguration - Configuration for a scoring code asset. type CodeConfiguration struct { // REQUIRED; [Required] The script to execute on startup. eg. "score.py" @@ -1344,6 +1862,9 @@ type CodeContainerProperties struct { // READ-ONLY; The next auto incremental version NextVersion *string + + // READ-ONLY; Provisioning state for the code container. + ProvisioningState *AssetProvisioningState } // CodeContainerResourceArmPaginatedResult - A paginated list of CodeContainer entities. @@ -1397,16 +1918,20 @@ type CodeVersion struct { // CodeVersionProperties - Code asset version details. type CodeVersionProperties struct { + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + // Uri where code is located CodeURI *string // The asset description text. Description *string - // If the name version are system generated (anonymous registration). + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous IsAnonymous *bool - // Is the asset archived? + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived IsArchived *bool // The asset property dictionary. @@ -1414,6 +1939,9 @@ type CodeVersionProperties struct { // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string + + // READ-ONLY; Provisioning state for the code version. + ProvisioningState *AssetProvisioningState } // CodeVersionResourceArmPaginatedResult - A paginated list of CodeVersion entities. @@ -1425,6 +1953,12 @@ type CodeVersionResourceArmPaginatedResult struct { Value []*CodeVersion } +// CodeVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the CodeVersionsClient.CreateOrGetStartPendingUpload +// method. +type CodeVersionsClientCreateOrGetStartPendingUploadOptions struct { + // placeholder for future optional parameters +} + // CodeVersionsClientCreateOrUpdateOptions contains the optional parameters for the CodeVersionsClient.CreateOrUpdate method. type CodeVersionsClientCreateOrUpdateOptions struct { // placeholder for future optional parameters @@ -1442,6 +1976,10 @@ type CodeVersionsClientGetOptions struct { // CodeVersionsClientListOptions contains the optional parameters for the CodeVersionsClient.NewListPager method. type CodeVersionsClientListOptions struct { + // If specified, return CodeVersion assets with specified content hash value, regardless of name + Hash *string + // Hash algorithm version when listing by hash + HashVersion *string // Ordering of list. OrderBy *string // Continuation token for pagination. @@ -1450,6 +1988,22 @@ type CodeVersionsClientListOptions struct { Top *int32 } +type Collection struct { + // The msi client id used to collect logging to blob storage. If it's null,backend will pick a registered endpoint identity + // to auth. + ClientID *string + + // Enable or disable data collection. + DataCollectionMode *DataCollectionMode + + // The data asset arm resource id. Client side will ensure data asset is pointing to the blob storage, and backend will collect + // data to the blob storage. + DataID *string + + // The sampling rate for collection. Sampling rate 1.0 means we collect 100% of data by default. + SamplingRate *float64 +} + // ColumnTransformer - Column transformer parameters. type ColumnTransformer struct { // Fields to apply transformer logic on. @@ -1470,6 +2024,9 @@ type CommandJob struct { // REQUIRED; [Required] Specifies the type of job. JobType *JobType + // Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null. + AutologgerSettings *AutologgerSettings + // ARM resource ID of the code asset. CodeID *string @@ -1485,7 +2042,7 @@ type CommandJob struct { // Display name of job. DisplayName *string - // Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null. + // Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, Ray, or null. Distribution DistributionConfigurationClassification // Environment variables included in the job. @@ -1507,15 +2064,24 @@ type CommandJob struct { // Command Job limit. Limits *CommandJobLimits + // Notification setting for the job + NotificationSetting *NotificationSetting + // Mapping of output data bindings used in the job. Outputs map[string]JobOutputClassification // The asset property dictionary. Properties map[string]*string + // Queue settings for the job + QueueSettings *QueueSettings + // Compute Resource configuration for the job. Resources *JobResourceConfiguration + // Configuration for secrets to be made available during runtime. + SecretsConfiguration map[string]*SecretConfiguration + // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. Services map[string]*JobService @@ -1532,18 +2098,20 @@ type CommandJob struct { // GetJobBaseProperties implements the JobBasePropertiesClassification interface for type CommandJob. func (c *CommandJob) GetJobBaseProperties() *JobBaseProperties { return &JobBaseProperties{ - ComponentID: c.ComponentID, - ComputeID: c.ComputeID, - DisplayName: c.DisplayName, - ExperimentName: c.ExperimentName, - Identity: c.Identity, - IsArchived: c.IsArchived, - JobType: c.JobType, - Services: c.Services, - Status: c.Status, - Description: c.Description, - Properties: c.Properties, - Tags: c.Tags, + ComponentID: c.ComponentID, + ComputeID: c.ComputeID, + DisplayName: c.DisplayName, + ExperimentName: c.ExperimentName, + Identity: c.Identity, + IsArchived: c.IsArchived, + JobType: c.JobType, + NotificationSetting: c.NotificationSetting, + SecretsConfiguration: c.SecretsConfiguration, + Services: c.Services, + Status: c.Status, + Description: c.Description, + Properties: c.Properties, + Tags: c.Tags, } } @@ -1602,6 +2170,9 @@ type ComponentContainerProperties struct { // READ-ONLY; The next auto incremental version NextVersion *string + + // READ-ONLY; Provisioning state for the component container. + ProvisioningState *AssetProvisioningState } // ComponentContainerResourceArmPaginatedResult - A paginated list of ComponentContainer entities. @@ -1657,23 +2228,33 @@ type ComponentVersion struct { // ComponentVersionProperties - Definition of a component version: defines resources that span component types. type ComponentVersionProperties struct { + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + // Defines Component definition details. ComponentSpec any // The asset description text. Description *string - // If the name version are system generated (anonymous registration). + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous IsAnonymous *bool - // Is the asset archived? + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived IsArchived *bool // The asset property dictionary. Properties map[string]*string + // Stage in the component lifecycle + Stage *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string + + // READ-ONLY; Provisioning state for the component version. + ProvisioningState *AssetProvisioningState } // ComponentVersionResourceArmPaginatedResult - A paginated list of ComponentVersion entities. @@ -1709,6 +2290,8 @@ type ComponentVersionsClientListOptions struct { OrderBy *string // Continuation token for pagination. Skip *string + // Component stage. + Stage *string // Maximum number of records to return. Top *int32 } @@ -1817,6 +2400,17 @@ type ComputeClientListOptions struct { Skip *string } +// ComputeClientUpdateCustomServicesOptions contains the optional parameters for the ComputeClient.UpdateCustomServices method. +type ComputeClientUpdateCustomServicesOptions struct { + // placeholder for future optional parameters +} + +// ComputeClientUpdateIdleShutdownSettingOptions contains the optional parameters for the ComputeClient.UpdateIdleShutdownSetting +// method. +type ComputeClientUpdateIdleShutdownSettingOptions struct { + // placeholder for future optional parameters +} + // ComputeInstance - An Azure Machine Learning compute instance. type ComputeInstance struct { // REQUIRED; The type of compute @@ -1879,6 +2473,12 @@ type ComputeInstanceApplication struct { EndpointURI *string } +// ComputeInstanceAutologgerSettings - Specifies settings for autologger. +type ComputeInstanceAutologgerSettings struct { + // Indicates whether mlflow autologger is enabled for notebooks. + MlflowAutologger *MlflowAutologger +} + // ComputeInstanceConnectivityEndpoints - Defines all connectivity endpoints and properties for an ComputeInstance. type ComputeInstanceConnectivityEndpoints struct { // READ-ONLY; Private IP Address of this ComputeInstance (local to the VNET in which the compute instance is deployed). @@ -1997,20 +2597,33 @@ type ComputeInstanceProperties struct { // user can access applications on this instance depending on his/her assigned role. ApplicationSharingPolicy *ApplicationSharingPolicy + // Specifies settings for autologger. + AutologgerSettings *ComputeInstanceAutologgerSettings + // The Compute Instance Authorization type. Available values are personal (default). ComputeInstanceAuthorizationType *ComputeInstanceAuthorizationType + // List of Custom Services added to the compute. + CustomServices []*CustomService + // Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that // the compute nodes will have public IPs provisioned. false - Indicates that the // compute nodes will have a private endpoint and no public IPs. EnableNodePublicIP *bool + // Stops compute instance after user defined period of inactivity. Time is defined in ISO8601 format. Minimum is 15 min, maximum + // is 3 days. + IdleTimeBeforeShutdown *string + // Settings for a personal compute instance. PersonalComputeInstanceSettings *PersonalComputeInstanceSettings // Specifies policy and settings for SSH access. SSHSettings *ComputeInstanceSSHSettings + // The list of schedules to be applied on the computes. + Schedules *ComputeSchedules + // Details of customized scripts to execute for setting up the cluster. SetupScripts *SetupScripts @@ -2044,8 +2657,8 @@ type ComputeInstanceProperties struct { // READ-ONLY; The last operation on ComputeInstance. LastOperation *ComputeInstanceLastOperation - // READ-ONLY; The list of schedules to be applied on the computes. - Schedules *ComputeSchedules + // READ-ONLY; Returns metadata about the operating system image for this compute instance. + OSImageMetadata *ImageMetadata // READ-ONLY; The current state of this ComputeInstance. State *ComputeInstanceState @@ -2118,6 +2731,10 @@ type ComputeResourceSchema struct { Properties ComputeClassification } +type ComputeRuntimeDto struct { + SparkRuntimeVersion *string +} + // ComputeSchedules - The list of schedules to be applied on the computes type ComputeSchedules struct { // The list of compute start stop schedules to be applied. @@ -2148,10 +2765,10 @@ type ComputeStartStopSchedule struct { Action *ComputePowerAction // Required if triggerType is Cron. - Cron *CronTrigger + Cron *Cron // Required if triggerType is Recurrence. - Recurrence *RecurrenceTrigger + Recurrence *Recurrence // [Deprecated] Not used any more. Schedule *ScheduleBase @@ -2190,10 +2807,37 @@ type ContainerResourceSettings struct { } type CosmosDbSettings struct { - // The throughput of the collections in cosmosdb database CollectionsThroughput *int32 } +type CreateMonitorAction struct { + // REQUIRED; [Required] Specifies the action type of the schedule + ActionType *ScheduleActionType + + // REQUIRED; [Required] Defines the monitor. + MonitorDefinition *MonitorDefinition +} + +// GetScheduleActionBase implements the ScheduleActionBaseClassification interface for type CreateMonitorAction. +func (c *CreateMonitorAction) GetScheduleActionBase() *ScheduleActionBase { + return &ScheduleActionBase{ + ActionType: c.ActionType, + } +} + +// Cron - The workflow trigger cron for ComputeStartStop schedule type. +type Cron struct { + // [Required] Specifies cron expression of schedule. The expression should follow NCronTab format. + Expression *string + + // The start time in yyyy-MM-ddTHH:mm:ss format. + StartTime *string + + // Specifies time zone in which the schedule runs. TimeZone should follow Windows time zone format. Refer: + // https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11 + TimeZone *string +} + type CronTrigger struct { // REQUIRED; [Required] Specifies cron expression of schedule. The expression should follow NCronTab format. Expression *string @@ -2240,6 +2884,66 @@ func (c *CustomForecastHorizon) GetForecastHorizon() *ForecastHorizon { } } +// CustomInferencingServer - Custom inference server configurations. +type CustomInferencingServer struct { + // REQUIRED; [Required] Inferencing server type for various targets. + ServerType *InferencingServerType + + // Inference configuration for custom inferencing. + InferenceConfiguration *OnlineInferenceConfiguration +} + +// GetInferencingServer implements the InferencingServerClassification interface for type CustomInferencingServer. +func (c *CustomInferencingServer) GetInferencingServer() *InferencingServer { + return &InferencingServer{ + ServerType: c.ServerType, + } +} + +// CustomKeys - Custom Keys credential object +type CustomKeys struct { + // Dictionary of + Keys map[string]*string +} + +// CustomKeysWorkspaceConnectionProperties - Category:= CustomKeys AuthType:= CustomKeys (as type discriminator) Credentials:= +// {CustomKeys} as Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.CustomKeys Target:= {any value} Use +// Metadata property bag for ApiVersion and other metadata fields +type CustomKeysWorkspaceConnectionProperties struct { + // REQUIRED; Authentication type of the connection target + AuthType *ConnectionAuthType + + // Category of the connection + Category *ConnectionCategory + + // Custom Keys credential object + Credentials *CustomKeys + ExpiryTime *time.Time + + // Anything + Metadata any + Target *string +} + +// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type CustomKeysWorkspaceConnectionProperties. +func (c *CustomKeysWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { + return &WorkspaceConnectionPropertiesV2{ + AuthType: c.AuthType, + Category: c.Category, + ExpiryTime: c.ExpiryTime, + Metadata: c.Metadata, + Target: c.Target, + } +} + +type CustomMetricThreshold struct { + // REQUIRED; [Required] The user-defined metric to calculate. + Metric *string + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + type CustomModelJobInput struct { // REQUIRED; [Required] Specifies the type of job. JobInputType *JobInputType @@ -2266,6 +2970,15 @@ type CustomModelJobOutput struct { // REQUIRED; [Required] Specifies the type of job. JobOutputType *JobOutputType + // Output Asset Name. + AssetName *string + + // Output Asset Version. + AssetVersion *string + + // Auto delete setting of output data asset. + AutoDeleteSetting *AutoDeleteSetting + // Description for the output. Description *string @@ -2284,6 +2997,41 @@ func (c *CustomModelJobOutput) GetJobOutput() *JobOutput { } } +type CustomMonitoringSignal struct { + // REQUIRED; [Required] ARM resource ID of the component resource used to calculate the custom metrics. + ComponentID *string + + // REQUIRED; [Required] A list of metrics to calculate and their associated thresholds. + MetricThresholds []*CustomMetricThreshold + + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType + + // REQUIRED; [Required] A list of metrics to calculate and their associated thresholds. + WorkspaceConnection *MonitoringWorkspaceConnection + + // Monitoring assets to take as input. Key is the component input port name, value is the data asset. + InputAssets map[string]MonitoringInputDataBaseClassification + + // Extra component parameters to take as input. Key is the component literal input port name, value is the parameter value. + Inputs map[string]JobInputClassification + + // The current notification mode for this signal. + Mode *MonitoringNotificationMode + + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string +} + +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type CustomMonitoringSignal. +func (c *CustomMonitoringSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: c.Mode, + Properties: c.Properties, + SignalType: c.SignalType, + } +} + // CustomNCrossValidations - N-Cross validations are specified by user. type CustomNCrossValidations struct { // REQUIRED; [Required] Mode for determining N-Cross validations. @@ -2315,6 +3063,30 @@ func (c *CustomSeasonality) GetSeasonality() *Seasonality { } } +// CustomService - Specifies the custom service configuration +type CustomService struct { + // OPTIONAL; Contains additional key/value pairs not defined in the schema. + AdditionalProperties map[string]any + + // Describes the docker settings for the image + Docker *Docker + + // Configuring the endpoints for the container + Endpoints []*Endpoint + + // Environment Variable for the container + EnvironmentVariables map[string]*EnvironmentVariable + + // Describes the Image Specifications + Image *Image + + // Name of the Custom Service + Name *string + + // Configuring the volumes for the container + Volumes []*VolumeDefinition +} + type CustomTargetLags struct { // REQUIRED; [Required] Set target lags mode - Auto/Custom Mode *TargetLagsMode @@ -2345,6 +3117,24 @@ func (c *CustomTargetRollingWindowSize) GetTargetRollingWindowSize() *TargetRoll } } +type DataCollector struct { + // REQUIRED; [Required] The collection configuration. Each collection has it own configuration to collect model data and the + // name of collection can be arbitrary string. Model data collector can be used for either + // payload logging or custom logging or both of them. Collection request and response are reserved for payload logging, others + // are for custom logging. + Collections map[string]*Collection + + // The request logging configuration for mdc, it includes advanced logging settings for all collections. It's optional. + RequestLogging *RequestLogging + + // When model data is collected to blob storage, we need to roll the data to different path to avoid logging all of them in + // a single blob file. If the rolling rate is hour, all data will be collected in + // the blob path /yyyy/MM/dd/HH/. If it's day, all data will be collected in blob path /yyyy/MM/dd/. The other benefit of + // rolling path is that model monitoring ui is able to select a time range of data + // very quickly. + RollingRate *RollingRateType +} + // DataContainer - Azure Resource Manager resource envelope. type DataContainer struct { // REQUIRED; [Required] Additional attributes of the entity. @@ -2420,6 +3210,66 @@ type DataContainersClientListOptions struct { Skip *string } +// DataDriftMetricThresholdBaseClassification provides polymorphic access to related types. +// Call the interface's GetDataDriftMetricThresholdBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CategoricalDataDriftMetricThreshold, *DataDriftMetricThresholdBase, *NumericalDataDriftMetricThreshold +type DataDriftMetricThresholdBaseClassification interface { + // GetDataDriftMetricThresholdBase returns the DataDriftMetricThresholdBase content of the underlying type. + GetDataDriftMetricThresholdBase() *DataDriftMetricThresholdBase +} + +type DataDriftMetricThresholdBase struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetDataDriftMetricThresholdBase implements the DataDriftMetricThresholdBaseClassification interface for type DataDriftMetricThresholdBase. +func (d *DataDriftMetricThresholdBase) GetDataDriftMetricThresholdBase() *DataDriftMetricThresholdBase { + return d +} + +type DataDriftMonitoringSignal struct { + // REQUIRED; [Required] A list of metrics to calculate and their associated thresholds. + MetricThresholds []DataDriftMetricThresholdBaseClassification + + // REQUIRED; [Required] The data which drift will be calculated for. + ProductionData MonitoringInputDataBaseClassification + + // REQUIRED; [Required] The data to calculate drift against. + ReferenceData MonitoringInputDataBaseClassification + + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType + + // The data segment used for scoping on a subset of the data population. + DataSegment *MonitoringDataSegment + + // A dictionary that maps feature names to their respective data types. + FeatureDataTypeOverride map[string]*MonitoringFeatureDataType + + // The feature filter which identifies which feature to calculate drift over. + Features MonitoringFeatureFilterBaseClassification + + // The current notification mode for this signal. + Mode *MonitoringNotificationMode + + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string +} + +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type DataDriftMonitoringSignal. +func (d *DataDriftMonitoringSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: d.Mode, + Properties: d.Properties, + SignalType: d.SignalType, + } +} + // DataFactory - A DataFactory compute. type DataFactory struct { // REQUIRED; The type of compute @@ -2470,6 +3320,81 @@ func (d *DataFactory) GetCompute() *Compute { } } +type DataImport struct { + // REQUIRED; [Required] Specifies the type of data. + DataType *DataType + + // REQUIRED; [Required] Uri of the data. Example: https://go.microsoft.com/fwlink/?linkid=2202330 + DataURI *string + + // Name of the asset for data import job to create + AssetName *string + + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + + // The asset description text. + Description *string + + // Intellectual Property details. Used if data is an Intellectual Property. + IntellectualProperty *IntellectualProperty + + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous + IsAnonymous *bool + + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived + IsArchived *bool + + // The asset property dictionary. + Properties map[string]*string + + // Source data of the asset to import from + Source DataImportSourceClassification + + // Stage in the data lifecycle assigned to this data asset + Stage *string + + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string +} + +// GetDataVersionBaseProperties implements the DataVersionBasePropertiesClassification interface for type DataImport. +func (d *DataImport) GetDataVersionBaseProperties() *DataVersionBaseProperties { + return &DataVersionBaseProperties{ + DataType: d.DataType, + DataURI: d.DataURI, + IntellectualProperty: d.IntellectualProperty, + Stage: d.Stage, + AutoDeleteSetting: d.AutoDeleteSetting, + IsAnonymous: d.IsAnonymous, + IsArchived: d.IsArchived, + Description: d.Description, + Properties: d.Properties, + Tags: d.Tags, + } +} + +// DataImportSourceClassification provides polymorphic access to related types. +// Call the interface's GetDataImportSource() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *DataImportSource, *DatabaseSource, *FileSystemSource +type DataImportSourceClassification interface { + // GetDataImportSource returns the DataImportSource content of the underlying type. + GetDataImportSource() *DataImportSource +} + +type DataImportSource struct { + // REQUIRED; [Required] Specifies the type of data. + SourceType *DataImportSourceType + + // Workspace connection for data import source storage + Connection *string +} + +// GetDataImportSource implements the DataImportSourceClassification interface for type DataImportSource. +func (d *DataImportSource) GetDataImportSource() *DataImportSource { return d } + // DataLakeAnalytics - A DataLakeAnalytics compute. type DataLakeAnalytics struct { // REQUIRED; The type of compute @@ -2549,6 +3474,63 @@ func (d *DataPathAssetReference) GetAssetReferenceBase() *AssetReferenceBase { } } +// DataQualityMetricThresholdBaseClassification provides polymorphic access to related types. +// Call the interface's GetDataQualityMetricThresholdBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CategoricalDataQualityMetricThreshold, *DataQualityMetricThresholdBase, *NumericalDataQualityMetricThreshold +type DataQualityMetricThresholdBaseClassification interface { + // GetDataQualityMetricThresholdBase returns the DataQualityMetricThresholdBase content of the underlying type. + GetDataQualityMetricThresholdBase() *DataQualityMetricThresholdBase +} + +type DataQualityMetricThresholdBase struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetDataQualityMetricThresholdBase implements the DataQualityMetricThresholdBaseClassification interface for type DataQualityMetricThresholdBase. +func (d *DataQualityMetricThresholdBase) GetDataQualityMetricThresholdBase() *DataQualityMetricThresholdBase { + return d +} + +type DataQualityMonitoringSignal struct { + // REQUIRED; [Required] A list of metrics to calculate and their associated thresholds. + MetricThresholds []DataQualityMetricThresholdBaseClassification + + // REQUIRED; [Required] The data produced by the production service which drift will be calculated for. + ProductionData MonitoringInputDataBaseClassification + + // REQUIRED; [Required] The data to calculate drift against. + ReferenceData MonitoringInputDataBaseClassification + + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType + + // A dictionary that maps feature names to their respective data types. + FeatureDataTypeOverride map[string]*MonitoringFeatureDataType + + // The features to calculate drift over. + Features MonitoringFeatureFilterBaseClassification + + // The current notification mode for this signal. + Mode *MonitoringNotificationMode + + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string +} + +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type DataQualityMonitoringSignal. +func (d *DataQualityMonitoringSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: d.Mode, + Properties: d.Properties, + SignalType: d.SignalType, + } +} + // DataVersionBase - Azure Resource Manager resource envelope. type DataVersionBase struct { // REQUIRED; [Required] Additional attributes of the entity. @@ -2581,21 +3563,31 @@ type DataVersionBaseProperties struct { // REQUIRED; [Required] Specifies the type of data. DataType *DataType - // REQUIRED; [Required] Uri of the data. Usage/meaning depends on Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20221001.Assets.DataVersionBase.DataType + // REQUIRED; [Required] Uri of the data. Example: https://go.microsoft.com/fwlink/?linkid=2202330 DataURI *string + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + // The asset description text. Description *string - // If the name version are system generated (anonymous registration). + // Intellectual Property details. Used if data is an Intellectual Property. + IntellectualProperty *IntellectualProperty + + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous IsAnonymous *bool - // Is the asset archived? + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived IsArchived *bool // The asset property dictionary. Properties map[string]*string + // Stage in the data lifecycle assigned to this data asset + Stage *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string } @@ -2638,6 +3630,8 @@ type DataVersionsClientListOptions struct { OrderBy *string // Continuation token for pagination. Skip *string + // data stage + Stage *string // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 Tags *string // Top count of results, top count cannot be greater than the page size. If topCount > page size, results with be default @@ -2645,6 +3639,34 @@ type DataVersionsClientListOptions struct { Top *int32 } +type DatabaseSource struct { + // REQUIRED; [Required] Specifies the type of data. + SourceType *DataImportSourceType + + // Workspace connection for data import source storage + Connection *string + + // SQL Query statement for data import Database source + Query *string + + // SQL StoredProcedure on data import Database source + StoredProcedure *string + + // SQL StoredProcedure parameters + StoredProcedureParams []map[string]*string + + // Name of the table on data import Database source + TableName *string +} + +// GetDataImportSource implements the DataImportSourceClassification interface for type DatabaseSource. +func (d *DatabaseSource) GetDataImportSource() *DataImportSource { + return &DataImportSource{ + Connection: d.Connection, + SourceType: d.SourceType, + } +} + // Databricks - A DataFactory compute. type Databricks struct { // REQUIRED; The type of compute @@ -2734,6 +3756,37 @@ type DatabricksSchema struct { Properties *DatabricksProperties } +type DatasetExportSummary struct { + // REQUIRED; [Required] The format of exported labels, also as the discriminator. + Format *ExportFormatType + + // READ-ONLY; The time when the export was completed. + EndDateTime *time.Time + + // READ-ONLY; The total number of labeled datapoints exported. + ExportedRowCount *int64 + + // READ-ONLY; The unique name of the labeled data asset. + LabeledAssetName *string + + // READ-ONLY; Name and identifier of the job containing exported labels. + LabelingJobID *string + + // READ-ONLY; The time when the export was requested. + StartDateTime *time.Time +} + +// GetExportSummary implements the ExportSummaryClassification interface for type DatasetExportSummary. +func (d *DatasetExportSummary) GetExportSummary() *ExportSummary { + return &ExportSummary{ + EndDateTime: d.EndDateTime, + ExportedRowCount: d.ExportedRowCount, + Format: d.Format, + LabelingJobID: d.LabelingJobID, + StartDateTime: d.StartDateTime, + } +} + // Datastore - Azure Resource Manager resource envelope. type Datastore struct { // REQUIRED; [Required] Additional attributes of the entity. @@ -2755,8 +3808,8 @@ type Datastore struct { // DatastoreCredentialsClassification provides polymorphic access to related types. // Call the interface's GetDatastoreCredentials() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *AccountKeyDatastoreCredentials, *CertificateDatastoreCredentials, *DatastoreCredentials, *NoneDatastoreCredentials, -// - *SasDatastoreCredentials, *ServicePrincipalDatastoreCredentials +// - *AccountKeyDatastoreCredentials, *CertificateDatastoreCredentials, *DatastoreCredentials, *KerberosKeytabCredentials, +// - *KerberosPasswordCredentials, *NoneDatastoreCredentials, *SasDatastoreCredentials, *ServicePrincipalDatastoreCredentials type DatastoreCredentialsClassification interface { // GetDatastoreCredentials returns the DatastoreCredentials content of the underlying type. GetDatastoreCredentials() *DatastoreCredentials @@ -2774,7 +3827,8 @@ func (d *DatastoreCredentials) GetDatastoreCredentials() *DatastoreCredentials { // DatastorePropertiesClassification provides polymorphic access to related types. // Call the interface's GetDatastoreProperties() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *AzureBlobDatastore, *AzureDataLakeGen1Datastore, *AzureDataLakeGen2Datastore, *AzureFileDatastore, *DatastoreProperties +// - *AzureBlobDatastore, *AzureDataLakeGen1Datastore, *AzureDataLakeGen2Datastore, *AzureFileDatastore, *DatastoreProperties, +// - *HdfsDatastore, *OneLakeDatastore type DatastorePropertiesClassification interface { // GetDatastoreProperties returns the DatastoreProperties content of the underlying type. GetDatastoreProperties() *DatastoreProperties @@ -2791,6 +3845,9 @@ type DatastoreProperties struct { // The asset description text. Description *string + // Intellectual Property details. + IntellectualProperty *IntellectualProperty + // The asset property dictionary. Properties map[string]*string @@ -2816,7 +3873,8 @@ type DatastoreResourceArmPaginatedResult struct { // DatastoreSecretsClassification provides polymorphic access to related types. // Call the interface's GetDatastoreSecrets() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *AccountKeyDatastoreSecrets, *CertificateDatastoreSecrets, *DatastoreSecrets, *SasDatastoreSecrets, *ServicePrincipalDatastoreSecrets +// - *AccountKeyDatastoreSecrets, *CertificateDatastoreSecrets, *DatastoreSecrets, *KerberosKeytabSecrets, *KerberosPasswordSecrets, +// - *SasDatastoreSecrets, *ServicePrincipalDatastoreSecrets type DatastoreSecretsClassification interface { // GetDatastoreSecrets returns the DatastoreSecrets content of the underlying type. GetDatastoreSecrets() *DatastoreSecrets @@ -2902,6 +3960,13 @@ type DeploymentResourceConfiguration struct { // Optional type of VM used as supported by the compute target. InstanceType *string + // Locations where the job can run. + Locations []*string + + // Optional max allowed number of instances or nodes to be used by the compute target. For use with elastic training, currently + // supported by PyTorch distribution type only. + MaxInstanceCount *int32 + // Additional properties bag. Properties map[string]any } @@ -2965,14 +4030,13 @@ type DiagnoseResult struct { // DiagnoseWorkspaceParameters - Parameters to diagnose a workspace type DiagnoseWorkspaceParameters struct { - // Value of Parameters Value *DiagnoseRequestProperties } // DistributionConfigurationClassification provides polymorphic access to related types. // Call the interface's GetDistributionConfiguration() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *DistributionConfiguration, *Mpi, *PyTorch, *TensorFlow +// - *DistributionConfiguration, *Mpi, *PyTorch, *Ray, *TensorFlow type DistributionConfigurationClassification interface { // GetDistributionConfiguration returns the DistributionConfiguration content of the underlying type. GetDistributionConfiguration() *DistributionConfiguration @@ -2989,6 +4053,14 @@ func (d *DistributionConfiguration) GetDistributionConfiguration() *Distribution return d } +type Docker struct { + // OPTIONAL; Contains additional key/value pairs not defined in the schema. + AdditionalProperties map[string]any + + // Indicate whether container shall run in privileged or non-privileged mode. + Privileged *bool +} + // EarlyTerminationPolicyClassification provides polymorphic access to related types. // Call the interface's GetEarlyTerminationPolicy() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: @@ -3013,26 +4085,67 @@ type EarlyTerminationPolicy struct { // GetEarlyTerminationPolicy implements the EarlyTerminationPolicyClassification interface for type EarlyTerminationPolicy. func (e *EarlyTerminationPolicy) GetEarlyTerminationPolicy() *EarlyTerminationPolicy { return e } -type EncryptionKeyVaultProperties struct { - // REQUIRED; Key vault uri to access the encryption key. - KeyIdentifier *string +type EmailMonitoringAlertNotificationSettings struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + AlertNotificationType *MonitoringAlertNotificationType - // REQUIRED; The ArmId of the keyVault where the customer owned encryption key is present. - KeyVaultArmID *string + // Configuration for notification. + EmailNotificationSetting *NotificationSetting +} - // For future use - The client id of the identity which will be used to access key vault. - IdentityClientID *string +// GetMonitoringAlertNotificationSettingsBase implements the MonitoringAlertNotificationSettingsBaseClassification interface +// for type EmailMonitoringAlertNotificationSettings. +func (e *EmailMonitoringAlertNotificationSettings) GetMonitoringAlertNotificationSettingsBase() *MonitoringAlertNotificationSettingsBase { + return &MonitoringAlertNotificationSettingsBase{ + AlertNotificationType: e.AlertNotificationType, + } +} + +type EncryptionKeyVaultUpdateProperties struct { + // REQUIRED + KeyIdentifier *string } type EncryptionProperty struct { - // REQUIRED; Customer Key vault properties. - KeyVaultProperties *EncryptionKeyVaultProperties + // REQUIRED; KeyVault details to do the encryption + KeyVaultProperties *KeyVaultProperties // REQUIRED; Indicates whether or not the encryption is enabled for the workspace. Status *EncryptionStatus - // The identity that will be used to access the key vault for encryption at rest. + // The byok cosmosdb account that customer brings to store customer's data with encryption + CosmosDbResourceID *string + + // Identity to be used with the keyVault Identity *IdentityForCmk + + // The byok search account that customer brings to store customer's data with encryption + SearchAccountResourceID *string + + // The byok storage account that customer brings to store customer's data with encryption + StorageAccountResourceID *string +} + +type EncryptionUpdateProperties struct { + // REQUIRED + KeyVaultProperties *EncryptionKeyVaultUpdateProperties +} + +type Endpoint struct { + // Host IP over which the application is exposed from the container + HostIP *string + + // Name of the Endpoint + Name *string + + // Protocol over which communication will happen over this endpoint + Protocol *Protocol + + // Port over which the application is exposed from container. + Published *int32 + + // Application port inside the container. + Target *int32 } // EndpointAuthKeys - Keys for endpoint authentication. @@ -3067,7 +4180,7 @@ type EndpointDeploymentPropertiesBase struct { // Description of the endpoint deployment. Description *string - // ARM resource ID or AssetId of the environment specification for the endpoint deployment. + // ARM resource ID of the environment specification for the endpoint deployment. EnvironmentID *string // Environment variables configuration for the deployment. @@ -3152,6 +4265,9 @@ type EnvironmentContainerProperties struct { // READ-ONLY; The next auto incremental version NextVersion *string + + // READ-ONLY; Provisioning state for the environment container. + ProvisioningState *AssetProvisioningState } // EnvironmentContainerResourceArmPaginatedResult - A paginated list of EnvironmentContainer entities. @@ -3188,6 +4304,17 @@ type EnvironmentContainersClientListOptions struct { Skip *string } +type EnvironmentVariable struct { + // OPTIONAL; Contains additional key/value pairs not defined in the schema. + AdditionalProperties map[string]any + + // Type of the Environment Variable. Possible values are: local - For local variable + Type *EnvironmentVariableType + + // Value of the Environment variable + Value *string +} + // EnvironmentVersion - Azure Resource Manager resource envelope. type EnvironmentVersion struct { // REQUIRED; [Required] Additional attributes of the entity. @@ -3208,6 +4335,9 @@ type EnvironmentVersion struct { // EnvironmentVersionProperties - Environment version details. type EnvironmentVersionProperties struct { + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + // Defines if image needs to be rebuilt based on base image changes. AutoRebuild *AutoRebuildSetting @@ -3226,10 +4356,14 @@ type EnvironmentVersionProperties struct { // Defines configuration specific to inference. InferenceConfig *InferenceContainerProperties - // If the name version are system generated (anonymous registration). + // Intellectual Property details. Used if environment is an Intellectual Property. + IntellectualProperty *IntellectualProperty + + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous IsAnonymous *bool - // Is the asset archived? + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived IsArchived *bool // The OS type of the environment. @@ -3238,11 +4372,17 @@ type EnvironmentVersionProperties struct { // The asset property dictionary. Properties map[string]*string + // Stage in the environment lifecycle assigned to this environment + Stage *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string // READ-ONLY; Environment type is either user managed or curated by the Azure ML service EnvironmentType *EnvironmentType + + // READ-ONLY; Provisioning state for the environment version. + ProvisioningState *AssetProvisioningState } // EnvironmentVersionResourceArmPaginatedResult - A paginated list of EnvironmentVersion entities. @@ -3278,6 +4418,8 @@ type EnvironmentVersionsClientListOptions struct { OrderBy *string // Continuation token for pagination. Skip *string + // Stage for including/excluding (for example) archived entities. Takes priority over listViewType + Stage *string // Maximum number of records to return. Top *int32 } @@ -3340,8 +4482,37 @@ type EstimatedVMPrices struct { Values []*EstimatedVMPrice } +// ExportSummaryClassification provides polymorphic access to related types. +// Call the interface's GetExportSummary() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CSVExportSummary, *CocoExportSummary, *DatasetExportSummary, *ExportSummary +type ExportSummaryClassification interface { + // GetExportSummary returns the ExportSummary content of the underlying type. + GetExportSummary() *ExportSummary +} + +type ExportSummary struct { + // REQUIRED; [Required] The format of exported labels, also as the discriminator. + Format *ExportFormatType + + // READ-ONLY; The time when the export was completed. + EndDateTime *time.Time + + // READ-ONLY; The total number of labeled datapoints exported. + ExportedRowCount *int64 + + // READ-ONLY; Name and identifier of the job containing exported labels. + LabelingJobID *string + + // READ-ONLY; The time when the export was requested. + StartDateTime *time.Time +} + +// GetExportSummary implements the ExportSummaryClassification interface for type ExportSummary. +func (e *ExportSummary) GetExportSummary() *ExportSummary { return e } + type ExternalFQDNResponse struct { - Value []*FQDNEndpoints + Value []*FQDNEndpointsPropertyBag } type FQDNEndpoint struct { @@ -3354,1658 +4525,4581 @@ type FQDNEndpointDetail struct { } type FQDNEndpoints struct { - Properties *FQDNEndpointsProperties -} - -type FQDNEndpointsProperties struct { Category *string Endpoints []*FQDNEndpoint } -// FeaturizationSettings - Featurization Configuration. -type FeaturizationSettings struct { - // Dataset language, useful for the text data. - DatasetLanguage *string +// FQDNEndpointsPropertyBag - Property bag for FQDN endpoints result +type FQDNEndpointsPropertyBag struct { + Properties *FQDNEndpoints } -type FlavorData struct { - // Model flavor-specific data. - Data map[string]*string -} +// Feature - Azure Resource Manager resource envelope. +type Feature struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *FeatureProperties -// ForecastHorizonClassification provides polymorphic access to related types. -// Call the interface's GetForecastHorizon() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *AutoForecastHorizon, *CustomForecastHorizon, *ForecastHorizon -type ForecastHorizonClassification interface { - // GetForecastHorizon returns the ForecastHorizon content of the underlying type. - GetForecastHorizon() *ForecastHorizon -} + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string -// ForecastHorizon - The desired maximum forecast horizon in units of time-series frequency. -type ForecastHorizon struct { - // REQUIRED; [Required] Set forecast horizon value selection mode. - Mode *ForecastHorizonMode -} - -// GetForecastHorizon implements the ForecastHorizonClassification interface for type ForecastHorizon. -func (f *ForecastHorizon) GetForecastHorizon() *ForecastHorizon { return f } + // READ-ONLY; The name of the resource + Name *string -// Forecasting task in AutoML Table vertical. -type Forecasting struct { - // REQUIRED; [Required] Task type for AutoMLJob. - TaskType *TaskType + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData - // REQUIRED; [Required] Training data input. - TrainingData *MLTableJobInput + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} - // Columns to use for CVSplit data. - CvSplitColumnNames []*string +type FeatureAttributionDriftMonitoringSignal struct { + // REQUIRED; [Required] A list of metrics to calculate and their associated thresholds. + MetricThreshold *FeatureAttributionMetricThreshold - // Featurization inputs needed for AutoML job. - FeaturizationSettings *TableVerticalFeaturizationSettings + // REQUIRED; [Required] The data which drift will be calculated for. + ProductionData []MonitoringInputDataBaseClassification - // Forecasting task specific inputs. - ForecastingSettings *ForecastingSettings + // REQUIRED; [Required] The data to calculate drift against. + ReferenceData MonitoringInputDataBaseClassification - // Execution constraints for AutoMLJob. - LimitSettings *TableVerticalLimitSettings + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType - // Log verbosity for the job. - LogVerbosity *LogVerbosity + // The current notification mode for this signal. + Mode *MonitoringNotificationMode - // Number of cross validation folds to be applied on training dataset when validation dataset is not provided. - NCrossValidations NCrossValidationsClassification + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string +} - // Primary metric for forecasting task. - PrimaryMetric *ForecastingPrimaryMetrics +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type FeatureAttributionDriftMonitoringSignal. +func (f *FeatureAttributionDriftMonitoringSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: f.Mode, + Properties: f.Properties, + SignalType: f.SignalType, + } +} - // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. - TargetColumnName *string +type FeatureAttributionMetricThreshold struct { + // REQUIRED; [Required] The feature attribution metric to calculate. + Metric *FeatureAttributionMetric - // Test data input. - TestData *MLTableJobInput + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} - // The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when - // validation dataset is not provided. - TestDataSize *float64 +// FeatureProperties - Dto object representing feature +type FeatureProperties struct { + // Specifies type + DataType *FeatureDataType - // Inputs for training phase for an AutoML Job. - TrainingSettings *ForecastingTrainingSettings + // The asset description text. + Description *string - // Validation data inputs. - ValidationData *MLTableJobInput + // Specifies name + FeatureName *string - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 + // The asset property dictionary. + Properties map[string]*string - // The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to - // be weighted up or down. - WeightColumnName *string + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string } -// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type Forecasting. -func (f *Forecasting) GetAutoMLVertical() *AutoMLVertical { - return &AutoMLVertical{ - LogVerbosity: f.LogVerbosity, - TargetColumnName: f.TargetColumnName, - TaskType: f.TaskType, - TrainingData: f.TrainingData, - } +// FeatureResourceArmPaginatedResult - A paginated list of Feature entities. +type FeatureResourceArmPaginatedResult struct { + // The link to the next page of Feature objects. If null, there are no additional pages. + NextLink *string + + // An array of objects of type Feature. + Value []*Feature } -// ForecastingSettings - Forecasting specific parameters. -type ForecastingSettings struct { - // Country or region for holidays for forecasting tasks. These should be ISO 3166 two-letter country/region codes, for example - // 'US' or 'GB'. - CountryOrRegionForHolidays *string +type FeatureStoreSettings struct { + ComputeRuntime *ComputeRuntimeDto + OfflineStoreConnectionName *string + OnlineStoreConnectionName *string +} - // Number of periods between the origin time of one CV fold and the next fold. For example, if CVStepSize = 3 for daily data, - // the origin time for each fold will be three days apart. - CvStepSize *int32 +type FeatureSubset struct { + // REQUIRED; [Required] The list of features to include. + Features []*string - // Flag for generating lags for the numeric features with 'auto' or null. - FeatureLags *FeatureLags + // REQUIRED; [Required] Specifies the feature filter to leverage when selecting features to calculate metrics over. + FilterType *MonitoringFeatureFilterType +} - // The desired maximum forecast horizon in units of time-series frequency. - ForecastHorizon ForecastHorizonClassification +// GetMonitoringFeatureFilterBase implements the MonitoringFeatureFilterBaseClassification interface for type FeatureSubset. +func (f *FeatureSubset) GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase { + return &MonitoringFeatureFilterBase{ + FilterType: f.FilterType, + } +} - // When forecasting, this parameter represents the period with which the forecast is desired, for example daily, weekly, yearly, - // etc. The forecast frequency is dataset frequency by default. - Frequency *string +// FeatureWindow - Specifies the feature window +type FeatureWindow struct { + // Specifies the feature window end time + FeatureWindowEnd *time.Time - // Set time series seasonality as an integer multiple of the series frequency. If seasonality is set to 'auto', it will be - // inferred. - Seasonality SeasonalityClassification + // Specifies the feature window start time + FeatureWindowStart *time.Time +} - // The parameter defining how if AutoML should handle short time series. - ShortSeriesHandlingConfig *ShortSeriesHandlingConfiguration +// FeaturesClientGetOptions contains the optional parameters for the FeaturesClient.Get method. +type FeaturesClientGetOptions struct { + // placeholder for future optional parameters +} - // The function to be used to aggregate the time series target column to conform to a user specified frequency. If the TargetAggregateFunction - // is set i.e. not 'None', but the freq parameter is not set, - // the error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean". - TargetAggregateFunction *TargetAggregationFunction +// FeaturesClientListOptions contains the optional parameters for the FeaturesClient.NewListPager method. +type FeaturesClientListOptions struct { + // Description of the featureset. + Description *string + // feature name. + FeatureName *string + // Continuation token for pagination. + Skip *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string +} - // The number of past periods to lag from the target column. - TargetLags TargetLagsClassification +// FeaturesetContainer - Azure Resource Manager resource envelope. +type FeaturesetContainer struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *FeaturesetContainerProperties - // The number of past periods used to create a rolling window average of the target column. - TargetRollingWindowSize TargetRollingWindowSizeClassification + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string - // The name of the time column. This parameter is required when forecasting to specify the datetime column in the input data - // used for building the time series and inferring its frequency. - TimeColumnName *string + // READ-ONLY; The name of the resource + Name *string - // The names of columns used to group a timeseries. It can be used to create multiple series. If grain is not defined, the - // data set is assumed to be one time-series. This parameter is used with task type - // forecasting. - TimeSeriesIDColumnNames []*string + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData - // Configure STL Decomposition of the time-series target column. - UseStl *UseStl + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string } -// ForecastingTrainingSettings - Forecasting Training related configuration. -type ForecastingTrainingSettings struct { - // Allowed models for forecasting task. - AllowedTrainingAlgorithms []*ForecastingModels +// FeaturesetContainerProperties - Dto object representing feature set +type FeaturesetContainerProperties struct { + // The asset description text. + Description *string - // Blocked models for forecasting task. - BlockedTrainingAlgorithms []*ForecastingModels + // Is the asset archived? + IsArchived *bool - // Enable recommendation of DNN models. - EnableDnnTraining *bool + // The asset property dictionary. + Properties map[string]*string - // Flag to turn on explainability on best model. - EnableModelExplainability *bool + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string - // Flag for enabling onnx compatible models. - EnableOnnxCompatibleModels *bool + // READ-ONLY; The latest version inside this container. + LatestVersion *string - // Enable stack ensemble run. - EnableStackEnsemble *bool + // READ-ONLY; The next auto incremental version + NextVersion *string - // Enable voting ensemble run. - EnableVoteEnsemble *bool + // READ-ONLY; Provisioning state for the featureset container. + ProvisioningState *AssetProvisioningState +} - // During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. - // Configure this parameter with a higher value than 300 secs, if more time - // is needed. - EnsembleModelDownloadTimeout *string +// FeaturesetContainerResourceArmPaginatedResult - A paginated list of FeaturesetContainer entities. +type FeaturesetContainerResourceArmPaginatedResult struct { + // The link to the next page of FeaturesetContainer objects. If null, there are no additional pages. + NextLink *string - // Stack ensemble settings for stack ensemble run. - StackEnsembleSettings *StackEnsembleSettings + // An array of objects of type FeaturesetContainer. + Value []*FeaturesetContainer } -// GridSamplingAlgorithm - Defines a Sampling Algorithm that exhaustively generates every value combination in the space -type GridSamplingAlgorithm struct { - // REQUIRED; [Required] The algorithm used for generating hyperparameter values, along with configuration properties - SamplingAlgorithmType *SamplingAlgorithmType +// FeaturesetContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturesetContainersClient.BeginCreateOrUpdate +// method. +type FeaturesetContainersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// GetSamplingAlgorithm implements the SamplingAlgorithmClassification interface for type GridSamplingAlgorithm. -func (g *GridSamplingAlgorithm) GetSamplingAlgorithm() *SamplingAlgorithm { - return &SamplingAlgorithm{ - SamplingAlgorithmType: g.SamplingAlgorithmType, - } +// FeaturesetContainersClientBeginDeleteOptions contains the optional parameters for the FeaturesetContainersClient.BeginDelete +// method. +type FeaturesetContainersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// HDInsight - A HDInsight compute. -type HDInsight struct { - // REQUIRED; The type of compute - ComputeType *ComputeType - - // Location for the underlying compute - ComputeLocation *string +// FeaturesetContainersClientGetEntityOptions contains the optional parameters for the FeaturesetContainersClient.GetEntity +// method. +type FeaturesetContainersClientGetEntityOptions struct { + // placeholder for future optional parameters +} - // The description of the Machine Learning compute. +// FeaturesetContainersClientListOptions contains the optional parameters for the FeaturesetContainersClient.NewListPager +// method. +type FeaturesetContainersClientListOptions struct { + // createdBy user name + CreatedBy *string + // description for the feature set Description *string + // [ListViewType.ActiveOnly, ListViewType.ArchivedOnly, ListViewType.All]View type for including/excluding (for example) archived + // entities. + ListViewType *ListViewType + // name for the featureset + Name *string + // page size + PageSize *int32 + // Continuation token for pagination. + Skip *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string +} - // Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication. - DisableLocalAuth *bool +// FeaturesetJob - Dto object representing the feature set job +type FeaturesetJob struct { + // Specifies the created date + CreatedDate *time.Time - // HDInsight compute properties - Properties *HDInsightProperties + // Specifies the display name + DisplayName *string - // ARM resource id of the underlying compute - ResourceID *string + // Specifies the duration + Duration *string - // READ-ONLY; The time at which the compute was created. - CreatedOn *time.Time + // Specifies the experiment id + ExperimentID *string - // READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning - // service provisioned it if false. - IsAttachedCompute *bool + // Specifies the backfill feature window to be materialized + FeatureWindow *FeatureWindow - // READ-ONLY; The time at which the compute was last modified. - ModifiedOn *time.Time + // Specifies the job id + JobID *string - // READ-ONLY; Errors during provisioning - ProvisioningErrors []*ErrorResponse + // Specifies the job status + Status *JobStatus - // READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. - ProvisioningState *ProvisioningState -} + // Specifies the tags if any + Tags map[string]*string -// GetCompute implements the ComputeClassification interface for type HDInsight. -func (h *HDInsight) GetCompute() *Compute { - return &Compute{ - ComputeType: h.ComputeType, - ComputeLocation: h.ComputeLocation, - ProvisioningState: h.ProvisioningState, - Description: h.Description, - CreatedOn: h.CreatedOn, - ModifiedOn: h.ModifiedOn, - ResourceID: h.ResourceID, - ProvisioningErrors: h.ProvisioningErrors, - IsAttachedCompute: h.IsAttachedCompute, - DisableLocalAuth: h.DisableLocalAuth, - } + // Specifies the feature store job type + Type *FeaturestoreJobType } -// HDInsightProperties - HDInsight compute properties -type HDInsightProperties struct { - // Public IP address of the master node of the cluster. - Address *string - - // Admin credentials for master node of the cluster - AdministratorAccount *VirtualMachineSSHCredentials +// FeaturesetJobArmPaginatedResult - A paginated list of FeaturesetJob entities. +type FeaturesetJobArmPaginatedResult struct { + // The link to the next page of FeaturesetJob objects. If null, there are no additional pages. + NextLink *string - // Port open for ssh connections on the master node of the cluster. - SSHPort *int32 + // An array of objects of type FeaturesetJob. + Value []*FeaturesetJob } -type HDInsightSchema struct { - // HDInsight compute properties - Properties *HDInsightProperties +// FeaturesetSpecification - Dto object representing specification +type FeaturesetSpecification struct { + // Specifies the spec path + Path *string } -// IDAssetReference - Reference to an asset via its ARM resource ID. -type IDAssetReference struct { - // REQUIRED; [Required] ARM resource ID of the asset. - AssetID *string +// FeaturesetVersion - Azure Resource Manager resource envelope. +type FeaturesetVersion struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *FeaturesetVersionProperties - // REQUIRED; [Required] Specifies the type of asset reference. - ReferenceType *ReferenceType -} + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string -// GetAssetReferenceBase implements the AssetReferenceBaseClassification interface for type IDAssetReference. -func (i *IDAssetReference) GetAssetReferenceBase() *AssetReferenceBase { - return &AssetReferenceBase{ - ReferenceType: i.ReferenceType, - } -} + // READ-ONLY; The name of the resource + Name *string -// IdentityConfigurationClassification provides polymorphic access to related types. -// Call the interface's GetIdentityConfiguration() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *AmlToken, *IdentityConfiguration, *ManagedIdentity, *UserIdentity -type IdentityConfigurationClassification interface { - // GetIdentityConfiguration returns the IdentityConfiguration content of the underlying type. - GetIdentityConfiguration() *IdentityConfiguration -} + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData -// IdentityConfiguration - Base definition for identity configuration. -type IdentityConfiguration struct { - // REQUIRED; [Required] Specifies the type of identity framework. - IdentityType *IdentityConfigurationType + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string } -// GetIdentityConfiguration implements the IdentityConfigurationClassification interface for type IdentityConfiguration. -func (i *IdentityConfiguration) GetIdentityConfiguration() *IdentityConfiguration { return i } +// FeaturesetVersionBackfillRequest - Request payload for creating a backfill request for a given feature set version +type FeaturesetVersionBackfillRequest struct { + // Specifies description + Description *string -// IdentityForCmk - Identity that will be used to access key vault for encryption at rest -type IdentityForCmk struct { - // The ArmId of the user assigned identity that will be used to access the customer managed key vault - UserAssignedIdentity *string + // Specifies description + DisplayName *string + + // Specifies the backfill feature window to be materialized + FeatureWindow *FeatureWindow + + // Specifies the compute resource settings + Resource *MaterializationComputeResource + + // Specifies the spark compute settings + SparkConfiguration map[string]*string + + // Specifies the tags + Tags map[string]*string } -// ImageClassification - Image Classification. Multi-class image classification is used when an image is classified with only -// a single label from a set of classes - e.g. each image is classified as either an image of a 'cat' -// or a 'dog' or a 'duck'. -type ImageClassification struct { - // REQUIRED; [Required] Limit settings for the AutoML job. - LimitSettings *ImageLimitSettings +// FeaturesetVersionProperties - Dto object representing feature set version +type FeaturesetVersionProperties struct { + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting - // REQUIRED; [Required] Task type for AutoMLJob. - TaskType *TaskType + // The asset description text. + Description *string - // REQUIRED; [Required] Training data input. - TrainingData *MLTableJobInput + // Specifies list of entities + Entities []*string - // Log verbosity for the job. - LogVerbosity *LogVerbosity + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous + IsAnonymous *bool - // Settings used for training the model. - ModelSettings *ImageModelSettingsClassification + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived + IsArchived *bool - // Primary metric to optimize for this task. - PrimaryMetric *ClassificationPrimaryMetrics + // Specifies the materialization settings + MaterializationSettings *MaterializationSettings - // Search space for sampling different combinations of models and their hyperparameters. - SearchSpace []*ImageModelDistributionSettingsClassification + // The asset property dictionary. + Properties map[string]*string - // Model sweeping and hyperparameter sweeping related settings. - SweepSettings *ImageSweepSettings + // Specifies the feature spec details + Specification *FeaturesetSpecification - // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. - TargetColumnName *string + // Specifies the asset stage + Stage *string - // Validation data inputs. - ValidationData *MLTableJobInput + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 + // READ-ONLY; Provisioning state for the featureset version container. + ProvisioningState *AssetProvisioningState } -// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageClassification. -func (i *ImageClassification) GetAutoMLVertical() *AutoMLVertical { - return &AutoMLVertical{ - LogVerbosity: i.LogVerbosity, - TargetColumnName: i.TargetColumnName, - TaskType: i.TaskType, - TrainingData: i.TrainingData, - } +// FeaturesetVersionResourceArmPaginatedResult - A paginated list of FeaturesetVersion entities. +type FeaturesetVersionResourceArmPaginatedResult struct { + // The link to the next page of FeaturesetVersion objects. If null, there are no additional pages. + NextLink *string + + // An array of objects of type FeaturesetVersion. + Value []*FeaturesetVersion } -type ImageClassificationBase struct { - // REQUIRED; [Required] Limit settings for the AutoML job. - LimitSettings *ImageLimitSettings +// FeaturesetVersionsClientBeginBackfillOptions contains the optional parameters for the FeaturesetVersionsClient.BeginBackfill +// method. +type FeaturesetVersionsClientBeginBackfillOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Settings used for training the model. - ModelSettings *ImageModelSettingsClassification +// FeaturesetVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturesetVersionsClient.BeginCreateOrUpdate +// method. +type FeaturesetVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Search space for sampling different combinations of models and their hyperparameters. - SearchSpace []*ImageModelDistributionSettingsClassification +// FeaturesetVersionsClientBeginDeleteOptions contains the optional parameters for the FeaturesetVersionsClient.BeginDelete +// method. +type FeaturesetVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Model sweeping and hyperparameter sweeping related settings. - SweepSettings *ImageSweepSettings +// FeaturesetVersionsClientGetOptions contains the optional parameters for the FeaturesetVersionsClient.Get method. +type FeaturesetVersionsClientGetOptions struct { + // placeholder for future optional parameters +} - // Validation data inputs. - ValidationData *MLTableJobInput +// FeaturesetVersionsClientListMaterializationJobsOptions contains the optional parameters for the FeaturesetVersionsClient.NewListMaterializationJobsPager +// method. +type FeaturesetVersionsClientListMaterializationJobsOptions struct { + // End time of the feature window to filter materialization jobs. + FeatureWindowEnd *string + // Start time of the feature window to filter materialization jobs. + FeatureWindowStart *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Filters *string + // Continuation token for pagination. + Skip *string +} - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 +// FeaturesetVersionsClientListOptions contains the optional parameters for the FeaturesetVersionsClient.NewListPager method. +type FeaturesetVersionsClientListOptions struct { + // createdBy user name + CreatedBy *string + // description for the feature set version + Description *string + // [ListViewType.ActiveOnly, ListViewType.ArchivedOnly, ListViewType.All]View type for including/excluding (for example) archived + // entities. + ListViewType *ListViewType + // page size + PageSize *int32 + // Continuation token for pagination. + Skip *string + // Specifies the featurestore stage + Stage *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string + // featureset version + Version *string + // name for the featureset version + VersionName *string } -// ImageClassificationMultilabel - Image Classification Multilabel. Multi-label image classification is used when an image -// could have one or more labels from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'. -type ImageClassificationMultilabel struct { - // REQUIRED; [Required] Limit settings for the AutoML job. - LimitSettings *ImageLimitSettings +// FeaturestoreEntityContainer - Azure Resource Manager resource envelope. +type FeaturestoreEntityContainer struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *FeaturestoreEntityContainerProperties - // REQUIRED; [Required] Task type for AutoMLJob. - TaskType *TaskType + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string - // REQUIRED; [Required] Training data input. - TrainingData *MLTableJobInput + // READ-ONLY; The name of the resource + Name *string - // Log verbosity for the job. - LogVerbosity *LogVerbosity + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData - // Settings used for training the model. - ModelSettings *ImageModelSettingsClassification + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} - // Primary metric to optimize for this task. - PrimaryMetric *ClassificationMultilabelPrimaryMetrics +// FeaturestoreEntityContainerProperties - Dto object representing feature entity +type FeaturestoreEntityContainerProperties struct { + // The asset description text. + Description *string - // Search space for sampling different combinations of models and their hyperparameters. - SearchSpace []*ImageModelDistributionSettingsClassification + // Is the asset archived? + IsArchived *bool - // Model sweeping and hyperparameter sweeping related settings. - SweepSettings *ImageSweepSettings + // The asset property dictionary. + Properties map[string]*string - // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. - TargetColumnName *string + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string - // Validation data inputs. - ValidationData *MLTableJobInput + // READ-ONLY; The latest version inside this container. + LatestVersion *string - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 -} + // READ-ONLY; The next auto incremental version + NextVersion *string -// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageClassificationMultilabel. -func (i *ImageClassificationMultilabel) GetAutoMLVertical() *AutoMLVertical { - return &AutoMLVertical{ - LogVerbosity: i.LogVerbosity, - TargetColumnName: i.TargetColumnName, - TaskType: i.TaskType, - TrainingData: i.TrainingData, - } + // READ-ONLY; Provisioning state for the featurestore entity container. + ProvisioningState *AssetProvisioningState } -// ImageInstanceSegmentation - Image Instance Segmentation. Instance segmentation is used to identify objects in an image -// at the pixel level, drawing a polygon around each object in the image. -type ImageInstanceSegmentation struct { - // REQUIRED; [Required] Limit settings for the AutoML job. - LimitSettings *ImageLimitSettings +// FeaturestoreEntityContainerResourceArmPaginatedResult - A paginated list of FeaturestoreEntityContainer entities. +type FeaturestoreEntityContainerResourceArmPaginatedResult struct { + // The link to the next page of FeaturestoreEntityContainer objects. If null, there are no additional pages. + NextLink *string - // REQUIRED; [Required] Task type for AutoMLJob. - TaskType *TaskType + // An array of objects of type FeaturestoreEntityContainer. + Value []*FeaturestoreEntityContainer +} - // REQUIRED; [Required] Training data input. - TrainingData *MLTableJobInput +// FeaturestoreEntityContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturestoreEntityContainersClient.BeginCreateOrUpdate +// method. +type FeaturestoreEntityContainersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Log verbosity for the job. - LogVerbosity *LogVerbosity +// FeaturestoreEntityContainersClientBeginDeleteOptions contains the optional parameters for the FeaturestoreEntityContainersClient.BeginDelete +// method. +type FeaturestoreEntityContainersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Settings used for training the model. - ModelSettings *ImageModelSettingsObjectDetection +// FeaturestoreEntityContainersClientGetEntityOptions contains the optional parameters for the FeaturestoreEntityContainersClient.GetEntity +// method. +type FeaturestoreEntityContainersClientGetEntityOptions struct { + // placeholder for future optional parameters +} - // Primary metric to optimize for this task. - PrimaryMetric *InstanceSegmentationPrimaryMetrics +// FeaturestoreEntityContainersClientListOptions contains the optional parameters for the FeaturestoreEntityContainersClient.NewListPager +// method. +type FeaturestoreEntityContainersClientListOptions struct { + // createdBy user name + CreatedBy *string + // description for the featurestore entity + Description *string + // [ListViewType.ActiveOnly, ListViewType.ArchivedOnly, ListViewType.All]View type for including/excluding (for example) archived + // entities. + ListViewType *ListViewType + // name for the featurestore entity + Name *string + // page size + PageSize *int32 + // Continuation token for pagination. + Skip *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string +} - // Search space for sampling different combinations of models and their hyperparameters. - SearchSpace []*ImageModelDistributionSettingsObjectDetection +// FeaturestoreEntityVersion - Azure Resource Manager resource envelope. +type FeaturestoreEntityVersion struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *FeaturestoreEntityVersionProperties - // Model sweeping and hyperparameter sweeping related settings. - SweepSettings *ImageSweepSettings + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string - // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. - TargetColumnName *string + // READ-ONLY; The name of the resource + Name *string - // Validation data inputs. - ValidationData *MLTableJobInput + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string } -// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageInstanceSegmentation. -func (i *ImageInstanceSegmentation) GetAutoMLVertical() *AutoMLVertical { - return &AutoMLVertical{ - LogVerbosity: i.LogVerbosity, - TargetColumnName: i.TargetColumnName, - TaskType: i.TaskType, - TrainingData: i.TrainingData, - } -} +// FeaturestoreEntityVersionProperties - Dto object representing feature entity version +type FeaturestoreEntityVersionProperties struct { + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting -// ImageLimitSettings - Limit settings for the AutoML job. -type ImageLimitSettings struct { - // Maximum number of concurrent AutoML iterations. - MaxConcurrentTrials *int32 + // The asset description text. + Description *string - // Maximum number of AutoML iterations. - MaxTrials *int32 + // Specifies index columns + IndexColumns []*IndexColumn - // AutoML job timeout. - Timeout *string -} + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous + IsAnonymous *bool -// ImageModelDistributionSettings - Distribution expressions to sweep over values of model settings.Some examples are:ModelName -// = "choice('seresnext', 'resnest50')"; LearningRate = "uniform(0.001, 0.01)"; LayersToFreeze = "choice(0, -// 2)";All distributions can be specified as distribution_name(min, max) or choice(val1, val2, …, valn) where distribution -// name can be: uniform, quniform, loguniform, etc For more details on how to -// compose distribution expressions please check the documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters -// For more information on the available settings -// please visit the official documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. -type ImageModelDistributionSettings struct { - // Enable AMSGrad when optimizer is 'adam' or 'adamw'. - AmsGradient *string + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived + IsArchived *bool - // Settings for using Augmentations. - Augmentations *string + // The asset property dictionary. + Properties map[string]*string - // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta1 *string + // Specifies the asset stage + Stage *string - // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta2 *string + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string - // Whether to use distributer training. - Distributed *string + // READ-ONLY; Provisioning state for the featurestore entity version. + ProvisioningState *AssetProvisioningState +} - // Enable early stopping logic during training. - EarlyStopping *string +// FeaturestoreEntityVersionResourceArmPaginatedResult - A paginated list of FeaturestoreEntityVersion entities. +type FeaturestoreEntityVersionResourceArmPaginatedResult struct { + // The link to the next page of FeaturestoreEntityVersion objects. If null, there are no additional pages. + NextLink *string - // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. - // Must be a positive integer. - EarlyStoppingDelay *string + // An array of objects of type FeaturestoreEntityVersion. + Value []*FeaturestoreEntityVersion +} - // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be - // a positive integer. - EarlyStoppingPatience *string +// FeaturestoreEntityVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.BeginCreateOrUpdate +// method. +type FeaturestoreEntityVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Enable normalization when exporting ONNX model. - EnableOnnxNormalization *string +// FeaturestoreEntityVersionsClientBeginDeleteOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.BeginDelete +// method. +type FeaturestoreEntityVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. - EvaluationFrequency *string +// FeaturestoreEntityVersionsClientGetOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.Get +// method. +type FeaturestoreEntityVersionsClientGetOptions struct { + // placeholder for future optional parameters +} - // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights - // while accumulating the gradients of those steps, and then using the - // accumulated gradients to compute the weight updates. Must be a positive integer. - GradientAccumulationStep *string +// FeaturestoreEntityVersionsClientListOptions contains the optional parameters for the FeaturestoreEntityVersionsClient.NewListPager +// method. +type FeaturestoreEntityVersionsClientListOptions struct { + // createdBy user name + CreatedBy *string + // description for the feature entity version + Description *string + // [ListViewType.ActiveOnly, ListViewType.ArchivedOnly, ListViewType.All]View type for including/excluding (for example) archived + // entities. + ListViewType *ListViewType + // page size + PageSize *int32 + // Continuation token for pagination. + Skip *string + // Specifies the featurestore stage + Stage *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string + // featurestore entity version + Version *string + // name for the featurestore entity version + VersionName *string +} - // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' - // means freezing layer0 and layer1. For a full list of models supported and details - // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - LayersToFreeze *string +// FeaturizationSettings - Featurization Configuration. +type FeaturizationSettings struct { + // Dataset language, useful for the text data. + DatasetLanguage *string +} - // Initial learning rate. Must be a float in the range [0, 1]. - LearningRate *string +type FileSystemSource struct { + // REQUIRED; [Required] Specifies the type of data. + SourceType *DataImportSourceType - // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. - LearningRateScheduler *string + // Workspace connection for data import source storage + Connection *string - // Name of the model to use for training. For more information on the available models please visit the official documentation: - // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - ModelName *string + // Path on data import FileSystem source + Path *string +} - // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. - Momentum *string +// GetDataImportSource implements the DataImportSourceClassification interface for type FileSystemSource. +func (f *FileSystemSource) GetDataImportSource() *DataImportSource { + return &DataImportSource{ + Connection: f.Connection, + SourceType: f.SourceType, + } +} - // Enable nesterov when optimizer is 'sgd'. - Nesterov *string +// FixedInputData - Fixed input data definition. +type FixedInputData struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + InputDataType *MonitoringInputDataType - // Number of training epochs. Must be a positive integer. - NumberOfEpochs *string + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType - // Number of data loader workers. Must be a non-negative integer. - NumberOfWorkers *string + // REQUIRED; [Required] Input Asset URI. + URI *string - // Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'. - Optimizer *string + // Mapping of column names to special uses. + Columns map[string]*string - // Random seed to be used when using deterministic training. - RandomSeed *string + // The context metadata of the data source. + DataContext *string +} - // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. - StepLRGamma *string +// GetMonitoringInputDataBase implements the MonitoringInputDataBaseClassification interface for type FixedInputData. +func (f *FixedInputData) GetMonitoringInputDataBase() *MonitoringInputDataBase { + return &MonitoringInputDataBase{ + Columns: f.Columns, + DataContext: f.DataContext, + InputDataType: f.InputDataType, + JobInputType: f.JobInputType, + URI: f.URI, + } +} - // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. - StepLRStepSize *string +type FlavorData struct { + // Model flavor-specific data. + Data map[string]*string +} - // Training batch size. Must be a positive integer. - TrainingBatchSize *string +// ForecastHorizonClassification provides polymorphic access to related types. +// Call the interface's GetForecastHorizon() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AutoForecastHorizon, *CustomForecastHorizon, *ForecastHorizon +type ForecastHorizonClassification interface { + // GetForecastHorizon returns the ForecastHorizon content of the underlying type. + GetForecastHorizon() *ForecastHorizon +} - // Validation batch size. Must be a positive integer. - ValidationBatchSize *string +// ForecastHorizon - The desired maximum forecast horizon in units of time-series frequency. +type ForecastHorizon struct { + // REQUIRED; [Required] Set forecast horizon value selection mode. + Mode *ForecastHorizonMode +} - // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. - WarmupCosineLRCycles *string +// GetForecastHorizon implements the ForecastHorizonClassification interface for type ForecastHorizon. +func (f *ForecastHorizon) GetForecastHorizon() *ForecastHorizon { return f } - // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. - WarmupCosineLRWarmupEpochs *string +// Forecasting task in AutoML Table vertical. +type Forecasting struct { + // REQUIRED; [Required] Task type for AutoMLJob. + TaskType *TaskType - // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. - WeightDecay *string -} + // REQUIRED; [Required] Training data input. + TrainingData *MLTableJobInput -// ImageModelDistributionSettingsClassification - Distribution expressions to sweep over values of model settings.Some examples -// are:ModelName = "choice('seresnext', 'resnest50')"; LearningRate = "uniform(0.001, 0.01)"; LayersToFreeze = "choice(0, -// 2)";For more details on how to compose distribution expressions please check the documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters -// For more information -// on the available settings please visit the official documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. -type ImageModelDistributionSettingsClassification struct { - // Enable AMSGrad when optimizer is 'adam' or 'adamw'. - AmsGradient *string + // Columns to use for CVSplit data. + CvSplitColumnNames []*string - // Settings for using Augmentations. - Augmentations *string + // Featurization inputs needed for AutoML job. + FeaturizationSettings *TableVerticalFeaturizationSettings - // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta1 *string + // Model/training parameters that will remain constant throughout training. + FixedParameters *TableFixedParameters - // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta2 *string + // Forecasting task specific inputs. + ForecastingSettings *ForecastingSettings - // Whether to use distributer training. - Distributed *string + // Execution constraints for AutoMLJob. + LimitSettings *TableVerticalLimitSettings - // Enable early stopping logic during training. - EarlyStopping *string + // Log verbosity for the job. + LogVerbosity *LogVerbosity - // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. - // Must be a positive integer. - EarlyStoppingDelay *string + // Number of cross validation folds to be applied on training dataset when validation dataset is not provided. + NCrossValidations NCrossValidationsClassification - // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be - // a positive integer. - EarlyStoppingPatience *string + // Primary metric for forecasting task. + PrimaryMetric *ForecastingPrimaryMetrics - // Enable normalization when exporting ONNX model. - EnableOnnxNormalization *string + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*TableParameterSubspace - // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. - EvaluationFrequency *string + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *TableSweepSettings - // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights - // while accumulating the gradients of those steps, and then using the - // accumulated gradients to compute the weight updates. Must be a positive integer. - GradientAccumulationStep *string + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. + TargetColumnName *string - // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' - // means freezing layer0 and layer1. For a full list of models supported and details - // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - LayersToFreeze *string + // Test data input. + TestData *MLTableJobInput - // Initial learning rate. Must be a float in the range [0, 1]. - LearningRate *string + // The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when + // validation dataset is not provided. + TestDataSize *float64 - // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. - LearningRateScheduler *string + // Inputs for training phase for an AutoML Job. + TrainingSettings *ForecastingTrainingSettings - // Name of the model to use for training. For more information on the available models please visit the official documentation: - // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - ModelName *string + // Validation data inputs. + ValidationData *MLTableJobInput - // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. - Momentum *string + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 - // Enable nesterov when optimizer is 'sgd'. - Nesterov *string + // The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to + // be weighted up or down. + WeightColumnName *string +} - // Number of training epochs. Must be a positive integer. - NumberOfEpochs *string +// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type Forecasting. +func (f *Forecasting) GetAutoMLVertical() *AutoMLVertical { + return &AutoMLVertical{ + LogVerbosity: f.LogVerbosity, + TargetColumnName: f.TargetColumnName, + TaskType: f.TaskType, + TrainingData: f.TrainingData, + } +} - // Number of data loader workers. Must be a non-negative integer. - NumberOfWorkers *string +// ForecastingSettings - Forecasting specific parameters. +type ForecastingSettings struct { + // Country or region for holidays for forecasting tasks. These should be ISO 3166 two-letter country/region codes, for example + // 'US' or 'GB'. + CountryOrRegionForHolidays *string - // Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'. - Optimizer *string + // Number of periods between the origin time of one CV fold and the next fold. For example, if CVStepSize = 3 for daily data, + // the origin time for each fold will be three days apart. + CvStepSize *int32 - // Random seed to be used when using deterministic training. - RandomSeed *string + // Flag for generating lags for the numeric features with 'auto' or null. + FeatureLags *FeatureLags - // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. - StepLRGamma *string + // The feature columns that are available for training but unknown at the time of forecast/inference. If featuresunknownatforecasttime + // is not set, it is assumed that all the feature columns in the + // dataset are known at inference time. + FeaturesUnknownAtForecastTime []*string - // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. - StepLRStepSize *string + // The desired maximum forecast horizon in units of time-series frequency. + ForecastHorizon ForecastHorizonClassification - // Training batch size. Must be a positive integer. - TrainingBatchSize *string + // When forecasting, this parameter represents the period with which the forecast is desired, for example daily, weekly, yearly, + // etc. The forecast frequency is dataset frequency by default. + Frequency *string - // Image crop size that is input to the neural network for the training dataset. Must be a positive integer. - TrainingCropSize *string + // Set time series seasonality as an integer multiple of the series frequency. If seasonality is set to 'auto', it will be + // inferred. + Seasonality SeasonalityClassification - // Validation batch size. Must be a positive integer. - ValidationBatchSize *string + // The parameter defining how if AutoML should handle short time series. + ShortSeriesHandlingConfig *ShortSeriesHandlingConfiguration - // Image crop size that is input to the neural network for the validation dataset. Must be a positive integer. - ValidationCropSize *string + // The function to be used to aggregate the time series target column to conform to a user specified frequency. If the TargetAggregateFunction + // is set i.e. not 'None', but the freq parameter is not set, + // the error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean". + TargetAggregateFunction *TargetAggregationFunction - // Image size to which to resize before cropping for validation dataset. Must be a positive integer. - ValidationResizeSize *string + // The number of past periods to lag from the target column. + TargetLags TargetLagsClassification - // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. - WarmupCosineLRCycles *string + // The number of past periods used to create a rolling window average of the target column. + TargetRollingWindowSize TargetRollingWindowSizeClassification - // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. - WarmupCosineLRWarmupEpochs *string + // The name of the time column. This parameter is required when forecasting to specify the datetime column in the input data + // used for building the time series and inferring its frequency. + TimeColumnName *string - // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. - WeightDecay *string + // The names of columns used to group a timeseries. It can be used to create multiple series. If grain is not defined, the + // data set is assumed to be one time-series. This parameter is used with task type + // forecasting. + TimeSeriesIDColumnNames []*string - // Weighted loss. The accepted values are 0 for no weighted loss. 1 for weighted loss with sqrt.(classweights). 2 for weighted - // loss with classweights. Must be 0 or 1 or 2. - WeightedLoss *string + // Configure STL Decomposition of the time-series target column. + UseStl *UseStl } -// ImageModelDistributionSettingsObjectDetection - Distribution expressions to sweep over values of model settings.Some examples -// are:ModelName = "choice('seresnext', 'resnest50')"; LearningRate = "uniform(0.001, 0.01)"; LayersToFreeze = "choice(0, -// 2)";For more details on how to compose distribution expressions please check the documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters -// For more information -// on the available settings please visit the official documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. -type ImageModelDistributionSettingsObjectDetection struct { - // Enable AMSGrad when optimizer is 'adam' or 'adamw'. - AmsGradient *string - - // Settings for using Augmentations. - Augmentations *string - - // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta1 *string - - // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta2 *string - - // Maximum number of detections per image, for all classes. Must be a positive integer. Note: This settings is not supported - // for the 'yolov5' algorithm. - BoxDetectionsPerImage *string - - // During inference, only return proposals with a classification score greater than BoxScoreThreshold. Must be a float in - // the range[0, 1]. - BoxScoreThreshold *string - - // Whether to use distributer training. - Distributed *string +// ForecastingTrainingSettings - Forecasting Training related configuration. +type ForecastingTrainingSettings struct { + // Allowed models for forecasting task. + AllowedTrainingAlgorithms []*ForecastingModels - // Enable early stopping logic during training. - EarlyStopping *string + // Blocked models for forecasting task. + BlockedTrainingAlgorithms []*ForecastingModels - // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. - // Must be a positive integer. - EarlyStoppingDelay *string + // Enable recommendation of DNN models. + EnableDnnTraining *bool - // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be - // a positive integer. - EarlyStoppingPatience *string + // Flag to turn on explainability on best model. + EnableModelExplainability *bool - // Enable normalization when exporting ONNX model. - EnableOnnxNormalization *string + // Flag for enabling onnx compatible models. + EnableOnnxCompatibleModels *bool - // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. - EvaluationFrequency *string + // Enable stack ensemble run. + EnableStackEnsemble *bool - // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights - // while accumulating the gradients of those steps, and then using the - // accumulated gradients to compute the weight updates. Must be a positive integer. - GradientAccumulationStep *string + // Enable voting ensemble run. + EnableVoteEnsemble *bool - // Image size for train and validation. Must be a positive integer. Note: The training run may get into CUDA OOM if the size - // is too big. Note: This settings is only supported for the 'yolov5' algorithm. - ImageSize *string + // During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. + // Configure this parameter with a higher value than 300 secs, if more time + // is needed. + EnsembleModelDownloadTimeout *string - // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' - // means freezing layer0 and layer1. For a full list of models supported and details - // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - LayersToFreeze *string + // Stack ensemble settings for stack ensemble run. + StackEnsembleSettings *StackEnsembleSettings - // Initial learning rate. Must be a float in the range [0, 1]. - LearningRate *string + // TrainingMode mode - Setting to 'auto' is same as setting it to 'non-distributed' for now, however in the future may result + // in mixed mode or heuristics based mode selection. Default is 'auto'. If + // 'Distributed' then only distributed featurization is used and distributed algorithms are chosen. If 'NonDistributed' then + // only non distributed algorithms are chosen. + TrainingMode *TrainingMode +} - // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. - LearningRateScheduler *string +// FqdnOutboundRule - FQDN Outbound Rule for the managed network of a machine learning workspace. +type FqdnOutboundRule struct { + // REQUIRED; Type of a managed network Outbound Rule of a machine learning workspace. + Type *RuleType - // Maximum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training - // run may get into CUDA OOM if the size is too big. Note: This settings is not - // supported for the 'yolov5' algorithm. - MaxSize *string + // Category of a managed network Outbound Rule of a machine learning workspace. + Category *RuleCategory + Destination *string - // Minimum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training - // run may get into CUDA OOM if the size is too big. Note: This settings is not - // supported for the 'yolov5' algorithm. - MinSize *string + // Type of a managed network Outbound Rule of a machine learning workspace. + Status *RuleStatus +} - // Name of the model to use for training. For more information on the available models please visit the official documentation: - // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - ModelName *string +// GetOutboundRule implements the OutboundRuleClassification interface for type FqdnOutboundRule. +func (f *FqdnOutboundRule) GetOutboundRule() *OutboundRule { + return &OutboundRule{ + Category: f.Category, + Status: f.Status, + Type: f.Type, + } +} - // Model size. Must be 'small', 'medium', 'large', or 'xlarge'. Note: training run may get into CUDA OOM if the model size - // is too big. Note: This settings is only supported for the 'yolov5' algorithm. - ModelSize *string +// GenerationSafetyQualityMetricThreshold - Generation safety quality metric threshold definition. +type GenerationSafetyQualityMetricThreshold struct { + // REQUIRED; [Required] Gets or sets the feature attribution metric to calculate. + Metric *GenerationSafetyQualityMetric - // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. - Momentum *string + // Gets or sets the threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} - // Enable multi-scale image by varying image size by +/- 50%. Note: training run may get into CUDA OOM if no sufficient GPU - // memory. Note: This settings is only supported for the 'yolov5' algorithm. - MultiScale *string +// GenerationSafetyQualityMonitoringSignal - Generation safety quality monitoring signal definition. +type GenerationSafetyQualityMonitoringSignal struct { + // REQUIRED; [Required] Gets or sets the metrics to calculate and the corresponding thresholds. + MetricThresholds []*GenerationSafetyQualityMetricThreshold - // Enable nesterov when optimizer is 'sgd'. - Nesterov *string + // REQUIRED; [Required] The sample rate of the target data, should be greater than 0 and at most 1. + SamplingRate *float64 - // IOU threshold used during inference in NMS post processing. Must be float in the range [0, 1]. - NmsIouThreshold *string + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType - // Number of training epochs. Must be a positive integer. - NumberOfEpochs *string + // The current notification mode for this signal. + Mode *MonitoringNotificationMode - // Number of data loader workers. Must be a non-negative integer. - NumberOfWorkers *string + // Gets or sets the target data for computing metrics. + ProductionData []MonitoringInputDataBaseClassification - // Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'. - Optimizer *string + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string - // Random seed to be used when using deterministic training. - RandomSeed *string + // Gets or sets the workspace connection ID used to connect to the content generation endpoint. + WorkspaceConnectionID *string +} - // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. - StepLRGamma *string +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type GenerationSafetyQualityMonitoringSignal. +func (g *GenerationSafetyQualityMonitoringSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: g.Mode, + Properties: g.Properties, + SignalType: g.SignalType, + } +} - // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. - StepLRStepSize *string +// GenerationTokenStatisticsMetricThreshold - Generation token statistics metric threshold definition. +type GenerationTokenStatisticsMetricThreshold struct { + // REQUIRED; [Required] Gets or sets the feature attribution metric to calculate. + Metric *GenerationTokenStatisticsMetric - // The grid size to use for tiling each image. Note: TileGridSize must not be None to enable small object detection logic. - // A string containing two integers in mxn format. Note: This settings is not - // supported for the 'yolov5' algorithm. - TileGridSize *string + // Gets or sets the threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} - // Overlap ratio between adjacent tiles in each dimension. Must be float in the range [0, 1). Note: This settings is not supported - // for the 'yolov5' algorithm. - TileOverlapRatio *string +// GenerationTokenStatisticsSignal - Generation token statistics signal definition. +type GenerationTokenStatisticsSignal struct { + // REQUIRED; [Required] Gets or sets the metrics to calculate and the corresponding thresholds. + MetricThresholds []*GenerationTokenStatisticsMetricThreshold - // The IOU threshold to use to perform NMS while merging predictions from tiles and image. Used in validation/ inference. - // Must be float in the range [0, 1]. Note: This settings is not supported for the - // 'yolov5' algorithm. NMS: Non-maximum suppression - TilePredictionsNmsThreshold *string + // REQUIRED; [Required] The sample rate of the target data, should be greater than 0 and at most 1. + SamplingRate *float64 - // Training batch size. Must be a positive integer. - TrainingBatchSize *string + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType - // Validation batch size. Must be a positive integer. - ValidationBatchSize *string + // The current notification mode for this signal. + Mode *MonitoringNotificationMode - // IOU threshold to use when computing validation metric. Must be float in the range [0, 1]. - ValidationIouThreshold *string + // Gets or sets the target data for computing metrics. + ProductionData MonitoringInputDataBaseClassification - // Metric computation method to use for validation metrics. Must be 'none', 'coco', 'voc', or 'coco_voc'. - ValidationMetricType *string + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string +} - // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. - WarmupCosineLRCycles *string +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type GenerationTokenStatisticsSignal. +func (g *GenerationTokenStatisticsSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: g.Mode, + Properties: g.Properties, + SignalType: g.SignalType, + } +} - // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. - WarmupCosineLRWarmupEpochs *string +// GridSamplingAlgorithm - Defines a Sampling Algorithm that exhaustively generates every value combination in the space +type GridSamplingAlgorithm struct { + // REQUIRED; [Required] The algorithm used for generating hyperparameter values, along with configuration properties + SamplingAlgorithmType *SamplingAlgorithmType +} - // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. - WeightDecay *string +// GetSamplingAlgorithm implements the SamplingAlgorithmClassification interface for type GridSamplingAlgorithm. +func (g *GridSamplingAlgorithm) GetSamplingAlgorithm() *SamplingAlgorithm { + return &SamplingAlgorithm{ + SamplingAlgorithmType: g.SamplingAlgorithmType, + } } -// ImageModelSettings - Settings used for training the model. For more information on the available settings please visit -// the official documentation: -// https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. -type ImageModelSettings struct { - // Settings for advanced scenarios. - AdvancedSettings *string +// HDInsight - A HDInsight compute. +type HDInsight struct { + // REQUIRED; The type of compute + ComputeType *ComputeType - // Enable AMSGrad when optimizer is 'adam' or 'adamw'. - AmsGradient *bool + // Location for the underlying compute + ComputeLocation *string - // Settings for using Augmentations. - Augmentations *string + // The description of the Machine Learning compute. + Description *string - // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta1 *float32 + // Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication. + DisableLocalAuth *bool - // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta2 *float32 + // HDInsight compute properties + Properties *HDInsightProperties - // Frequency to store model checkpoints. Must be a positive integer. - CheckpointFrequency *int32 + // ARM resource id of the underlying compute + ResourceID *string - // The pretrained checkpoint model for incremental training. - CheckpointModel *MLFlowModelJobInput + // READ-ONLY; The time at which the compute was created. + CreatedOn *time.Time - // The id of a previous run that has a pretrained checkpoint for incremental training. - CheckpointRunID *string + // READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning + // service provisioned it if false. + IsAttachedCompute *bool - // Whether to use distributed training. - Distributed *bool + // READ-ONLY; The time at which the compute was last modified. + ModifiedOn *time.Time - // Enable early stopping logic during training. - EarlyStopping *bool + // READ-ONLY; Errors during provisioning + ProvisioningErrors []*ErrorResponse - // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. - // Must be a positive integer. - EarlyStoppingDelay *int32 - - // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be - // a positive integer. - EarlyStoppingPatience *int32 + // READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. + ProvisioningState *ProvisioningState +} - // Enable normalization when exporting ONNX model. - EnableOnnxNormalization *bool +// GetCompute implements the ComputeClassification interface for type HDInsight. +func (h *HDInsight) GetCompute() *Compute { + return &Compute{ + ComputeType: h.ComputeType, + ComputeLocation: h.ComputeLocation, + ProvisioningState: h.ProvisioningState, + Description: h.Description, + CreatedOn: h.CreatedOn, + ModifiedOn: h.ModifiedOn, + ResourceID: h.ResourceID, + ProvisioningErrors: h.ProvisioningErrors, + IsAttachedCompute: h.IsAttachedCompute, + DisableLocalAuth: h.DisableLocalAuth, + } +} - // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. - EvaluationFrequency *int32 +// HDInsightProperties - HDInsight compute properties +type HDInsightProperties struct { + // Public IP address of the master node of the cluster. + Address *string - // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights - // while accumulating the gradients of those steps, and then using the - // accumulated gradients to compute the weight updates. Must be a positive integer. - GradientAccumulationStep *int32 + // Admin credentials for master node of the cluster + AdministratorAccount *VirtualMachineSSHCredentials - // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' - // means freezing layer0 and layer1. For a full list of models supported and details - // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - LayersToFreeze *int32 + // Port open for ssh connections on the master node of the cluster. + SSHPort *int32 +} - // Initial learning rate. Must be a float in the range [0, 1]. - LearningRate *float32 +type HDInsightSchema struct { + // HDInsight compute properties + Properties *HDInsightProperties +} - // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. - LearningRateScheduler *LearningRateScheduler +type HdfsDatastore struct { + // REQUIRED; [Required] Account credentials. + Credentials DatastoreCredentialsClassification - // Name of the model to use for training. For more information on the available models please visit the official documentation: - // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - ModelName *string + // REQUIRED; [Required] Storage type backing the datastore. + DatastoreType *DatastoreType - // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. - Momentum *float32 + // REQUIRED; [Required] IP Address or DNS HostName. + NameNodeAddress *string - // Enable nesterov when optimizer is 'sgd'. - Nesterov *bool + // The asset description text. + Description *string - // Number of training epochs. Must be a positive integer. - NumberOfEpochs *int32 + // The TLS cert of the HDFS server. Needs to be a base64 encoded string. Required if "Https" protocol is selected. + HdfsServerCertificate *string - // Number of data loader workers. Must be a non-negative integer. - NumberOfWorkers *int32 + // Intellectual Property details. + IntellectualProperty *IntellectualProperty - // Type of optimizer. - Optimizer *StochasticOptimizer + // The asset property dictionary. + Properties map[string]*string - // Random seed to be used when using deterministic training. - RandomSeed *int32 + // Protocol used to communicate with the storage account (Https/Http). + Protocol *string - // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. - StepLRGamma *float32 + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string - // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. - StepLRStepSize *int32 + // READ-ONLY; Readonly property to indicate if datastore is the workspace default datastore + IsDefault *bool +} - // Training batch size. Must be a positive integer. - TrainingBatchSize *int32 +// GetDatastoreProperties implements the DatastorePropertiesClassification interface for type HdfsDatastore. +func (h *HdfsDatastore) GetDatastoreProperties() *DatastoreProperties { + return &DatastoreProperties{ + Credentials: h.Credentials, + DatastoreType: h.DatastoreType, + IntellectualProperty: h.IntellectualProperty, + IsDefault: h.IsDefault, + Description: h.Description, + Properties: h.Properties, + Tags: h.Tags, + } +} - // Validation batch size. Must be a positive integer. - ValidationBatchSize *int32 +// IDAssetReference - Reference to an asset via its ARM resource ID. +type IDAssetReference struct { + // REQUIRED; [Required] ARM resource ID of the asset. + AssetID *string - // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. - WarmupCosineLRCycles *float32 + // REQUIRED; [Required] Specifies the type of asset reference. + ReferenceType *ReferenceType +} - // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. - WarmupCosineLRWarmupEpochs *int32 +// GetAssetReferenceBase implements the AssetReferenceBaseClassification interface for type IDAssetReference. +func (i *IDAssetReference) GetAssetReferenceBase() *AssetReferenceBase { + return &AssetReferenceBase{ + ReferenceType: i.ReferenceType, + } +} - // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. - WeightDecay *float32 +// IdentityConfigurationClassification provides polymorphic access to related types. +// Call the interface's GetIdentityConfiguration() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AmlToken, *IdentityConfiguration, *ManagedIdentity, *UserIdentity +type IdentityConfigurationClassification interface { + // GetIdentityConfiguration returns the IdentityConfiguration content of the underlying type. + GetIdentityConfiguration() *IdentityConfiguration } -// ImageModelSettingsClassification - Settings used for training the model. For more information on the available settings -// please visit the official documentation: -// https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. -type ImageModelSettingsClassification struct { - // Settings for advanced scenarios. - AdvancedSettings *string +// IdentityConfiguration - Base definition for identity configuration. +type IdentityConfiguration struct { + // REQUIRED; [Required] Specifies the type of identity framework. + IdentityType *IdentityConfigurationType +} - // Enable AMSGrad when optimizer is 'adam' or 'adamw'. - AmsGradient *bool +// GetIdentityConfiguration implements the IdentityConfigurationClassification interface for type IdentityConfiguration. +func (i *IdentityConfiguration) GetIdentityConfiguration() *IdentityConfiguration { return i } - // Settings for using Augmentations. - Augmentations *string +// IdentityForCmk - Identity object used for encryption. +type IdentityForCmk struct { + // UserAssignedIdentity to be used to fetch the encryption key from keyVault + UserAssignedIdentity *string +} - // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta1 *float32 +// IdleShutdownSetting - Stops compute instance after user defined period of inactivity. +type IdleShutdownSetting struct { + // Time is defined in ISO8601 format. Minimum is 15 min, maximum is 3 days. + IdleTimeBeforeShutdown *string +} - // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta2 *float32 +type Image struct { + // OPTIONAL; Contains additional key/value pairs not defined in the schema. + AdditionalProperties map[string]any - // Frequency to store model checkpoints. Must be a positive integer. - CheckpointFrequency *int32 + // Image reference URL + Reference *string - // The pretrained checkpoint model for incremental training. - CheckpointModel *MLFlowModelJobInput + // Type of the image. Possible values are: docker - For docker images. azureml - For AzureML images + Type *ImageType +} - // The id of a previous run that has a pretrained checkpoint for incremental training. - CheckpointRunID *string +// ImageClassification - Image Classification. Multi-class image classification is used when an image is classified with only +// a single label from a set of classes - e.g. each image is classified as either an image of a 'cat' +// or a 'dog' or a 'duck'. +type ImageClassification struct { + // REQUIRED; [Required] Limit settings for the AutoML job. + LimitSettings *ImageLimitSettings - // Whether to use distributed training. - Distributed *bool + // REQUIRED; [Required] Task type for AutoMLJob. + TaskType *TaskType - // Enable early stopping logic during training. - EarlyStopping *bool + // REQUIRED; [Required] Training data input. + TrainingData *MLTableJobInput - // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. - // Must be a positive integer. - EarlyStoppingDelay *int32 + // Log verbosity for the job. + LogVerbosity *LogVerbosity - // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be - // a positive integer. - EarlyStoppingPatience *int32 + // Settings used for training the model. + ModelSettings *ImageModelSettingsClassification - // Enable normalization when exporting ONNX model. - EnableOnnxNormalization *bool + // Primary metric to optimize for this task. + PrimaryMetric *ClassificationPrimaryMetrics - // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. - EvaluationFrequency *int32 + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*ImageModelDistributionSettingsClassification - // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights - // while accumulating the gradients of those steps, and then using the - // accumulated gradients to compute the weight updates. Must be a positive integer. - GradientAccumulationStep *int32 + // Model sweeping and hyperparameter sweeping related settings. + SweepSettings *ImageSweepSettings - // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' - // means freezing layer0 and layer1. For a full list of models supported and details - // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - LayersToFreeze *int32 + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. + TargetColumnName *string - // Initial learning rate. Must be a float in the range [0, 1]. - LearningRate *float32 + // Validation data inputs. + ValidationData *MLTableJobInput - // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. - LearningRateScheduler *LearningRateScheduler + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 +} - // Name of the model to use for training. For more information on the available models please visit the official documentation: - // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - ModelName *string +// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageClassification. +func (i *ImageClassification) GetAutoMLVertical() *AutoMLVertical { + return &AutoMLVertical{ + LogVerbosity: i.LogVerbosity, + TargetColumnName: i.TargetColumnName, + TaskType: i.TaskType, + TrainingData: i.TrainingData, + } +} - // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. - Momentum *float32 +type ImageClassificationBase struct { + // REQUIRED; [Required] Limit settings for the AutoML job. + LimitSettings *ImageLimitSettings - // Enable nesterov when optimizer is 'sgd'. - Nesterov *bool + // Settings used for training the model. + ModelSettings *ImageModelSettingsClassification - // Number of training epochs. Must be a positive integer. - NumberOfEpochs *int32 + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*ImageModelDistributionSettingsClassification - // Number of data loader workers. Must be a non-negative integer. - NumberOfWorkers *int32 + // Model sweeping and hyperparameter sweeping related settings. + SweepSettings *ImageSweepSettings - // Type of optimizer. - Optimizer *StochasticOptimizer + // Validation data inputs. + ValidationData *MLTableJobInput - // Random seed to be used when using deterministic training. - RandomSeed *int32 + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 +} - // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. - StepLRGamma *float32 +// ImageClassificationMultilabel - Image Classification Multilabel. Multi-label image classification is used when an image +// could have one or more labels from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'. +type ImageClassificationMultilabel struct { + // REQUIRED; [Required] Limit settings for the AutoML job. + LimitSettings *ImageLimitSettings - // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. - StepLRStepSize *int32 + // REQUIRED; [Required] Task type for AutoMLJob. + TaskType *TaskType - // Training batch size. Must be a positive integer. - TrainingBatchSize *int32 + // REQUIRED; [Required] Training data input. + TrainingData *MLTableJobInput - // Image crop size that is input to the neural network for the training dataset. Must be a positive integer. - TrainingCropSize *int32 + // Log verbosity for the job. + LogVerbosity *LogVerbosity - // Validation batch size. Must be a positive integer. - ValidationBatchSize *int32 + // Settings used for training the model. + ModelSettings *ImageModelSettingsClassification - // Image crop size that is input to the neural network for the validation dataset. Must be a positive integer. - ValidationCropSize *int32 + // Primary metric to optimize for this task. + PrimaryMetric *ClassificationMultilabelPrimaryMetrics - // Image size to which to resize before cropping for validation dataset. Must be a positive integer. - ValidationResizeSize *int32 + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*ImageModelDistributionSettingsClassification - // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. - WarmupCosineLRCycles *float32 + // Model sweeping and hyperparameter sweeping related settings. + SweepSettings *ImageSweepSettings - // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. - WarmupCosineLRWarmupEpochs *int32 + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. + TargetColumnName *string - // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. - WeightDecay *float32 + // Validation data inputs. + ValidationData *MLTableJobInput - // Weighted loss. The accepted values are 0 for no weighted loss. 1 for weighted loss with sqrt.(classweights). 2 for weighted - // loss with classweights. Must be 0 or 1 or 2. - WeightedLoss *int32 + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 } -// ImageModelSettingsObjectDetection - Settings used for training the model. For more information on the available settings -// please visit the official documentation: -// https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. -type ImageModelSettingsObjectDetection struct { - // Settings for advanced scenarios. - AdvancedSettings *string +// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageClassificationMultilabel. +func (i *ImageClassificationMultilabel) GetAutoMLVertical() *AutoMLVertical { + return &AutoMLVertical{ + LogVerbosity: i.LogVerbosity, + TargetColumnName: i.TargetColumnName, + TaskType: i.TaskType, + TrainingData: i.TrainingData, + } +} - // Enable AMSGrad when optimizer is 'adam' or 'adamw'. - AmsGradient *bool +// ImageInstanceSegmentation - Image Instance Segmentation. Instance segmentation is used to identify objects in an image +// at the pixel level, drawing a polygon around each object in the image. +type ImageInstanceSegmentation struct { + // REQUIRED; [Required] Limit settings for the AutoML job. + LimitSettings *ImageLimitSettings - // Settings for using Augmentations. - Augmentations *string + // REQUIRED; [Required] Task type for AutoMLJob. + TaskType *TaskType - // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta1 *float32 + // REQUIRED; [Required] Training data input. + TrainingData *MLTableJobInput - // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. - Beta2 *float32 + // Log verbosity for the job. + LogVerbosity *LogVerbosity - // Maximum number of detections per image, for all classes. Must be a positive integer. Note: This settings is not supported - // for the 'yolov5' algorithm. - BoxDetectionsPerImage *int32 + // Settings used for training the model. + ModelSettings *ImageModelSettingsObjectDetection - // During inference, only return proposals with a classification score greater than BoxScoreThreshold. Must be a float in - // the range[0, 1]. - BoxScoreThreshold *float32 + // Primary metric to optimize for this task. + PrimaryMetric *InstanceSegmentationPrimaryMetrics - // Frequency to store model checkpoints. Must be a positive integer. - CheckpointFrequency *int32 + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*ImageModelDistributionSettingsObjectDetection - // The pretrained checkpoint model for incremental training. - CheckpointModel *MLFlowModelJobInput + // Model sweeping and hyperparameter sweeping related settings. + SweepSettings *ImageSweepSettings - // The id of a previous run that has a pretrained checkpoint for incremental training. - CheckpointRunID *string + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. + TargetColumnName *string - // Whether to use distributed training. - Distributed *bool + // Validation data inputs. + ValidationData *MLTableJobInput + + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 +} + +// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageInstanceSegmentation. +func (i *ImageInstanceSegmentation) GetAutoMLVertical() *AutoMLVertical { + return &AutoMLVertical{ + LogVerbosity: i.LogVerbosity, + TargetColumnName: i.TargetColumnName, + TaskType: i.TaskType, + TrainingData: i.TrainingData, + } +} + +// ImageLimitSettings - Limit settings for the AutoML job. +type ImageLimitSettings struct { + // Maximum number of concurrent AutoML iterations. + MaxConcurrentTrials *int32 + + // Maximum number of AutoML iterations. + MaxTrials *int32 + + // AutoML job timeout. + Timeout *string +} + +// ImageMetadata - Returns metadata about the operating system image for this compute instance. +type ImageMetadata struct { + // Specifies the current operating system image version this compute instance is running on. + CurrentImageVersion *string + + // Specifies whether this compute instance is running on the latest operating system image. + IsLatestOsImageVersion *bool + + // Specifies the latest available operating system image version. + LatestImageVersion *string +} + +// ImageModelDistributionSettings - Distribution expressions to sweep over values of model settings.Some examples are: +// ModelName = "choice('seresnext', 'resnest50')"; +// LearningRate = "uniform(0.001, 0.01)"; +// LayersToFreeze = "choice(0, 2)"; +// All distributions can be specified as distribution_name(min, max) or choice(val1, val2, …, valn) where distribution name +// can be: uniform, quniform, loguniform, etc For more details on how to compose +// distribution expressions please check the documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters +// For more information on the available settings please visit +// the official documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. +type ImageModelDistributionSettings struct { + // Enable AMSGrad when optimizer is 'adam' or 'adamw'. + AmsGradient *string + + // Settings for using Augmentations. + Augmentations *string + + // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta1 *string + + // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta2 *string + + // Whether to use distributer training. + Distributed *string // Enable early stopping logic during training. - EarlyStopping *bool + EarlyStopping *string // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. // Must be a positive integer. - EarlyStoppingDelay *int32 + EarlyStoppingDelay *string // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be // a positive integer. - EarlyStoppingPatience *int32 + EarlyStoppingPatience *string // Enable normalization when exporting ONNX model. - EnableOnnxNormalization *bool + EnableOnnxNormalization *string // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. - EvaluationFrequency *int32 + EvaluationFrequency *string // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights // while accumulating the gradients of those steps, and then using the // accumulated gradients to compute the weight updates. Must be a positive integer. - GradientAccumulationStep *int32 - - // Image size for train and validation. Must be a positive integer. Note: The training run may get into CUDA OOM if the size - // is too big. Note: This settings is only supported for the 'yolov5' algorithm. - ImageSize *int32 + GradientAccumulationStep *string // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' // means freezing layer0 and layer1. For a full list of models supported and details // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. - LayersToFreeze *int32 + LayersToFreeze *string // Initial learning rate. Must be a float in the range [0, 1]. - LearningRate *float32 + LearningRate *string // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. - LearningRateScheduler *LearningRateScheduler - - // Maximum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training - // run may get into CUDA OOM if the size is too big. Note: This settings is not - // supported for the 'yolov5' algorithm. - MaxSize *int32 - - // Minimum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training - // run may get into CUDA OOM if the size is too big. Note: This settings is not - // supported for the 'yolov5' algorithm. - MinSize *int32 + LearningRateScheduler *string // Name of the model to use for training. For more information on the available models please visit the official documentation: // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. ModelName *string - // Model size. Must be 'small', 'medium', 'large', or 'xlarge'. Note: training run may get into CUDA OOM if the model size - // is too big. Note: This settings is only supported for the 'yolov5' algorithm. - ModelSize *ModelSize - // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. - Momentum *float32 - - // Enable multi-scale image by varying image size by +/- 50%. Note: training run may get into CUDA OOM if no sufficient GPU - // memory. Note: This settings is only supported for the 'yolov5' algorithm. - MultiScale *bool + Momentum *string // Enable nesterov when optimizer is 'sgd'. - Nesterov *bool - - // IOU threshold used during inference in NMS post processing. Must be a float in the range [0, 1]. - NmsIouThreshold *float32 + Nesterov *string // Number of training epochs. Must be a positive integer. - NumberOfEpochs *int32 + NumberOfEpochs *string // Number of data loader workers. Must be a non-negative integer. - NumberOfWorkers *int32 + NumberOfWorkers *string - // Type of optimizer. - Optimizer *StochasticOptimizer + // Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'. + Optimizer *string // Random seed to be used when using deterministic training. - RandomSeed *int32 + RandomSeed *string // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. - StepLRGamma *float32 + StepLRGamma *string // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. - StepLRStepSize *int32 - - // The grid size to use for tiling each image. Note: TileGridSize must not be None to enable small object detection logic. - // A string containing two integers in mxn format. Note: This settings is not - // supported for the 'yolov5' algorithm. - TileGridSize *string - - // Overlap ratio between adjacent tiles in each dimension. Must be float in the range [0, 1). Note: This settings is not supported - // for the 'yolov5' algorithm. - TileOverlapRatio *float32 - - // The IOU threshold to use to perform NMS while merging predictions from tiles and image. Used in validation/ inference. - // Must be float in the range [0, 1]. Note: This settings is not supported for the - // 'yolov5' algorithm. - TilePredictionsNmsThreshold *float32 + StepLRStepSize *string // Training batch size. Must be a positive integer. - TrainingBatchSize *int32 + TrainingBatchSize *string // Validation batch size. Must be a positive integer. - ValidationBatchSize *int32 - - // IOU threshold to use when computing validation metric. Must be float in the range [0, 1]. - ValidationIouThreshold *float32 - - // Metric computation method to use for validation metrics. - ValidationMetricType *ValidationMetricType + ValidationBatchSize *string // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. - WarmupCosineLRCycles *float32 + WarmupCosineLRCycles *string // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. - WarmupCosineLRWarmupEpochs *int32 + WarmupCosineLRWarmupEpochs *string // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. - WeightDecay *float32 + WeightDecay *string } -// ImageObjectDetection - Image Object Detection. Object detection is used to identify objects in an image and locate each -// object with a bounding box e.g. locate all dogs and cats in an image and draw a bounding box around -// each. -type ImageObjectDetection struct { - // REQUIRED; [Required] Limit settings for the AutoML job. - LimitSettings *ImageLimitSettings +// ImageModelDistributionSettingsClassification - Distribution expressions to sweep over values of model settings.Some examples +// are: +// ModelName = "choice('seresnext', 'resnest50')"; +// LearningRate = "uniform(0.001, 0.01)"; +// LayersToFreeze = "choice(0, 2)"; +// For more details on how to compose distribution expressions please check the documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters +// For more information on +// the available settings please visit the official documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. +type ImageModelDistributionSettingsClassification struct { + // Enable AMSGrad when optimizer is 'adam' or 'adamw'. + AmsGradient *string - // REQUIRED; [Required] Task type for AutoMLJob. - TaskType *TaskType + // Settings for using Augmentations. + Augmentations *string - // REQUIRED; [Required] Training data input. - TrainingData *MLTableJobInput + // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta1 *string - // Log verbosity for the job. - LogVerbosity *LogVerbosity + // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta2 *string - // Settings used for training the model. - ModelSettings *ImageModelSettingsObjectDetection + // Whether to use distributer training. + Distributed *string - // Primary metric to optimize for this task. - PrimaryMetric *ObjectDetectionPrimaryMetrics + // Enable early stopping logic during training. + EarlyStopping *string - // Search space for sampling different combinations of models and their hyperparameters. - SearchSpace []*ImageModelDistributionSettingsObjectDetection + // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. + // Must be a positive integer. + EarlyStoppingDelay *string - // Model sweeping and hyperparameter sweeping related settings. - SweepSettings *ImageSweepSettings + // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be + // a positive integer. + EarlyStoppingPatience *string - // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. - TargetColumnName *string + // Enable normalization when exporting ONNX model. + EnableOnnxNormalization *string - // Validation data inputs. - ValidationData *MLTableJobInput + // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. + EvaluationFrequency *string - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 -} + // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights + // while accumulating the gradients of those steps, and then using the + // accumulated gradients to compute the weight updates. Must be a positive integer. + GradientAccumulationStep *string -// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageObjectDetection. -func (i *ImageObjectDetection) GetAutoMLVertical() *AutoMLVertical { - return &AutoMLVertical{ - LogVerbosity: i.LogVerbosity, - TargetColumnName: i.TargetColumnName, - TaskType: i.TaskType, - TrainingData: i.TrainingData, - } -} + // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' + // means freezing layer0 and layer1. For a full list of models supported and details + // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + LayersToFreeze *string -type ImageObjectDetectionBase struct { - // REQUIRED; [Required] Limit settings for the AutoML job. - LimitSettings *ImageLimitSettings + // Initial learning rate. Must be a float in the range [0, 1]. + LearningRate *string - // Settings used for training the model. - ModelSettings *ImageModelSettingsObjectDetection + // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. + LearningRateScheduler *string - // Search space for sampling different combinations of models and their hyperparameters. - SearchSpace []*ImageModelDistributionSettingsObjectDetection + // Name of the model to use for training. For more information on the available models please visit the official documentation: + // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + ModelName *string - // Model sweeping and hyperparameter sweeping related settings. - SweepSettings *ImageSweepSettings + // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. + Momentum *string - // Validation data inputs. - ValidationData *MLTableJobInput + // Enable nesterov when optimizer is 'sgd'. + Nesterov *string - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 -} + // Number of training epochs. Must be a positive integer. + NumberOfEpochs *string -// ImageSweepSettings - Model sweeping and hyperparameter sweeping related settings. -type ImageSweepSettings struct { - // REQUIRED; [Required] Type of the hyperparameter sampling algorithms. - SamplingAlgorithm *SamplingAlgorithmType + // Number of data loader workers. Must be a non-negative integer. + NumberOfWorkers *string - // Type of early termination policy. - EarlyTermination EarlyTerminationPolicyClassification -} + // Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'. + Optimizer *string -// ImageVertical - Abstract class for AutoML tasks that train image (computer vision) models - such as Image Classification -// / Image Classification Multilabel / Image Object Detection / Image Instance Segmentation. -type ImageVertical struct { - // REQUIRED; [Required] Limit settings for the AutoML job. - LimitSettings *ImageLimitSettings + // Random seed to be used when using deterministic training. + RandomSeed *string - // Model sweeping and hyperparameter sweeping related settings. - SweepSettings *ImageSweepSettings + // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. + StepLRGamma *string - // Validation data inputs. - ValidationData *MLTableJobInput + // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. + StepLRStepSize *string - // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied - // when validation dataset is not provided. - ValidationDataSize *float64 -} + // Training batch size. Must be a positive integer. + TrainingBatchSize *string -type InferenceContainerProperties struct { - // The route to check the liveness of the inference server container. - LivenessRoute *Route + // Image crop size that is input to the neural network for the training dataset. Must be a positive integer. + TrainingCropSize *string - // The route to check the readiness of the inference server container. - ReadinessRoute *Route + // Validation batch size. Must be a positive integer. + ValidationBatchSize *string - // The port to send the scoring requests to, within the inference server container. - ScoringRoute *Route -} + // Image crop size that is input to the neural network for the validation dataset. Must be a positive integer. + ValidationCropSize *string -// InstanceTypeSchema - Instance type schema. -type InstanceTypeSchema struct { - // Node Selector - NodeSelector map[string]*string + // Image size to which to resize before cropping for validation dataset. Must be a positive integer. + ValidationResizeSize *string - // Resource requests/limits for this instance type - Resources *InstanceTypeSchemaResources -} + // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. + WarmupCosineLRCycles *string -// InstanceTypeSchemaResources - Resource requests/limits for this instance type -type InstanceTypeSchemaResources struct { - // Resource limits for this instance type - Limits map[string]*string + // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. + WarmupCosineLRWarmupEpochs *string - // Resource requests for this instance type - Requests map[string]*string -} + // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. + WeightDecay *string -// JobBase - Azure Resource Manager resource envelope. -type JobBase struct { - // REQUIRED; [Required] Additional attributes of the entity. - Properties JobBasePropertiesClassification + // Weighted loss. The accepted values are 0 for no weighted loss. 1 for weighted loss with sqrt.(classweights). 2 for weighted + // loss with classweights. Must be 0 or 1 or 2. + WeightedLoss *string +} - // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string +// ImageModelDistributionSettingsObjectDetection - Distribution expressions to sweep over values of model settings.Some examples +// are: +// ModelName = "choice('seresnext', 'resnest50')"; +// LearningRate = "uniform(0.001, 0.01)"; +// LayersToFreeze = "choice(0, 2)"; +// For more details on how to compose distribution expressions please check the documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters +// For more information on +// the available settings please visit the official documentation: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. +type ImageModelDistributionSettingsObjectDetection struct { + // Enable AMSGrad when optimizer is 'adam' or 'adamw'. + AmsGradient *string - // READ-ONLY; The name of the resource - Name *string + // Settings for using Augmentations. + Augmentations *string - // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData + // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta1 *string - // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string -} + // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta2 *string -// JobBasePropertiesClassification provides polymorphic access to related types. -// Call the interface's GetJobBaseProperties() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *AutoMLJob, *CommandJob, *JobBaseProperties, *PipelineJob, *SweepJob -type JobBasePropertiesClassification interface { - // GetJobBaseProperties returns the JobBaseProperties content of the underlying type. - GetJobBaseProperties() *JobBaseProperties -} + // Maximum number of detections per image, for all classes. Must be a positive integer. Note: This settings is not supported + // for the 'yolov5' algorithm. + BoxDetectionsPerImage *string -// JobBaseProperties - Base definition for a job. -type JobBaseProperties struct { - // REQUIRED; [Required] Specifies the type of job. - JobType *JobType + // During inference, only return proposals with a classification score greater than BoxScoreThreshold. Must be a float in + // the range[0, 1]. + BoxScoreThreshold *string - // ARM resource ID of the component resource. - ComponentID *string + // Whether to use distributer training. + Distributed *string - // ARM resource ID of the compute resource. - ComputeID *string + // Enable early stopping logic during training. + EarlyStopping *string - // The asset description text. - Description *string + // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. + // Must be a positive integer. + EarlyStoppingDelay *string - // Display name of job. - DisplayName *string + // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be + // a positive integer. + EarlyStoppingPatience *string - // The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment. - ExperimentName *string + // Enable normalization when exporting ONNX model. + EnableOnnxNormalization *string - // Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken - // if null. - Identity IdentityConfigurationClassification + // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. + EvaluationFrequency *string - // Is the asset archived? - IsArchived *bool + // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights + // while accumulating the gradients of those steps, and then using the + // accumulated gradients to compute the weight updates. Must be a positive integer. + GradientAccumulationStep *string - // The asset property dictionary. - Properties map[string]*string + // Image size for train and validation. Must be a positive integer. Note: The training run may get into CUDA OOM if the size + // is too big. Note: This settings is only supported for the 'yolov5' algorithm. + ImageSize *string - // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. - Services map[string]*JobService + // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' + // means freezing layer0 and layer1. For a full list of models supported and details + // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + LayersToFreeze *string - // Tag dictionary. Tags can be added, removed, and updated. - Tags map[string]*string + // Initial learning rate. Must be a float in the range [0, 1]. + LearningRate *string - // READ-ONLY; Status of the job. - Status *JobStatus -} + // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. + LearningRateScheduler *string -// GetJobBaseProperties implements the JobBasePropertiesClassification interface for type JobBaseProperties. -func (j *JobBaseProperties) GetJobBaseProperties() *JobBaseProperties { return j } + // Maximum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training + // run may get into CUDA OOM if the size is too big. Note: This settings is not + // supported for the 'yolov5' algorithm. + MaxSize *string -// JobBaseResourceArmPaginatedResult - A paginated list of JobBase entities. -type JobBaseResourceArmPaginatedResult struct { - // The link to the next page of JobBase objects. If null, there are no additional pages. - NextLink *string + // Minimum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training + // run may get into CUDA OOM if the size is too big. Note: This settings is not + // supported for the 'yolov5' algorithm. + MinSize *string + + // Name of the model to use for training. For more information on the available models please visit the official documentation: + // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + ModelName *string + + // Model size. Must be 'small', 'medium', 'large', or 'xlarge'. Note: training run may get into CUDA OOM if the model size + // is too big. Note: This settings is only supported for the 'yolov5' algorithm. + ModelSize *string + + // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. + Momentum *string + + // Enable multi-scale image by varying image size by +/- 50%. Note: training run may get into CUDA OOM if no sufficient GPU + // memory. Note: This settings is only supported for the 'yolov5' algorithm. + MultiScale *string + + // Enable nesterov when optimizer is 'sgd'. + Nesterov *string + + // IOU threshold used during inference in NMS post processing. Must be float in the range [0, 1]. + NmsIouThreshold *string + + // Number of training epochs. Must be a positive integer. + NumberOfEpochs *string + + // Number of data loader workers. Must be a non-negative integer. + NumberOfWorkers *string + + // Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'. + Optimizer *string + + // Random seed to be used when using deterministic training. + RandomSeed *string + + // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. + StepLRGamma *string + + // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. + StepLRStepSize *string + + // The grid size to use for tiling each image. Note: TileGridSize must not be None to enable small object detection logic. + // A string containing two integers in mxn format. Note: This settings is not + // supported for the 'yolov5' algorithm. + TileGridSize *string + + // Overlap ratio between adjacent tiles in each dimension. Must be float in the range [0, 1). Note: This settings is not supported + // for the 'yolov5' algorithm. + TileOverlapRatio *string + + // The IOU threshold to use to perform NMS while merging predictions from tiles and image. Used in validation/ inference. + // Must be float in the range [0, 1]. Note: This settings is not supported for the + // 'yolov5' algorithm. NMS: Non-maximum suppression + TilePredictionsNmsThreshold *string + + // Training batch size. Must be a positive integer. + TrainingBatchSize *string + + // Validation batch size. Must be a positive integer. + ValidationBatchSize *string + + // IOU threshold to use when computing validation metric. Must be float in the range [0, 1]. + ValidationIouThreshold *string + + // Metric computation method to use for validation metrics. Must be 'none', 'coco', 'voc', or 'coco_voc'. + ValidationMetricType *string + + // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. + WarmupCosineLRCycles *string + + // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. + WarmupCosineLRWarmupEpochs *string + + // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. + WeightDecay *string +} + +// ImageModelSettings - Settings used for training the model. For more information on the available settings please visit +// the official documentation: +// https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. +type ImageModelSettings struct { + // Settings for advanced scenarios. + AdvancedSettings *string + + // Enable AMSGrad when optimizer is 'adam' or 'adamw'. + AmsGradient *bool + + // Settings for using Augmentations. + Augmentations *string + + // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta1 *float32 + + // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta2 *float32 + + // Frequency to store model checkpoints. Must be a positive integer. + CheckpointFrequency *int32 + + // The pretrained checkpoint model for incremental training. + CheckpointModel *MLFlowModelJobInput + + // The id of a previous run that has a pretrained checkpoint for incremental training. + CheckpointRunID *string + + // Whether to use distributed training. + Distributed *bool + + // Enable early stopping logic during training. + EarlyStopping *bool + + // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. + // Must be a positive integer. + EarlyStoppingDelay *int32 + + // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be + // a positive integer. + EarlyStoppingPatience *int32 + + // Enable normalization when exporting ONNX model. + EnableOnnxNormalization *bool + + // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. + EvaluationFrequency *int32 + + // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights + // while accumulating the gradients of those steps, and then using the + // accumulated gradients to compute the weight updates. Must be a positive integer. + GradientAccumulationStep *int32 + + // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' + // means freezing layer0 and layer1. For a full list of models supported and details + // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + LayersToFreeze *int32 + + // Initial learning rate. Must be a float in the range [0, 1]. + LearningRate *float32 + + // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. + LearningRateScheduler *LearningRateScheduler + + // Name of the model to use for training. For more information on the available models please visit the official documentation: + // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + ModelName *string + + // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. + Momentum *float32 + + // Enable nesterov when optimizer is 'sgd'. + Nesterov *bool + + // Number of training epochs. Must be a positive integer. + NumberOfEpochs *int32 + + // Number of data loader workers. Must be a non-negative integer. + NumberOfWorkers *int32 + + // Type of optimizer. + Optimizer *StochasticOptimizer + + // Random seed to be used when using deterministic training. + RandomSeed *int32 + + // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. + StepLRGamma *float32 + + // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. + StepLRStepSize *int32 + + // Training batch size. Must be a positive integer. + TrainingBatchSize *int32 + + // Validation batch size. Must be a positive integer. + ValidationBatchSize *int32 + + // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. + WarmupCosineLRCycles *float32 + + // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. + WarmupCosineLRWarmupEpochs *int32 + + // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. + WeightDecay *float32 +} + +// ImageModelSettingsClassification - Settings used for training the model. For more information on the available settings +// please visit the official documentation: +// https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. +type ImageModelSettingsClassification struct { + // Settings for advanced scenarios. + AdvancedSettings *string + + // Enable AMSGrad when optimizer is 'adam' or 'adamw'. + AmsGradient *bool + + // Settings for using Augmentations. + Augmentations *string + + // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta1 *float32 + + // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta2 *float32 + + // Frequency to store model checkpoints. Must be a positive integer. + CheckpointFrequency *int32 + + // The pretrained checkpoint model for incremental training. + CheckpointModel *MLFlowModelJobInput + + // The id of a previous run that has a pretrained checkpoint for incremental training. + CheckpointRunID *string + + // Whether to use distributed training. + Distributed *bool + + // Enable early stopping logic during training. + EarlyStopping *bool + + // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. + // Must be a positive integer. + EarlyStoppingDelay *int32 + + // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be + // a positive integer. + EarlyStoppingPatience *int32 + + // Enable normalization when exporting ONNX model. + EnableOnnxNormalization *bool + + // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. + EvaluationFrequency *int32 + + // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights + // while accumulating the gradients of those steps, and then using the + // accumulated gradients to compute the weight updates. Must be a positive integer. + GradientAccumulationStep *int32 + + // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' + // means freezing layer0 and layer1. For a full list of models supported and details + // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + LayersToFreeze *int32 + + // Initial learning rate. Must be a float in the range [0, 1]. + LearningRate *float32 + + // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. + LearningRateScheduler *LearningRateScheduler + + // Name of the model to use for training. For more information on the available models please visit the official documentation: + // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + ModelName *string + + // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. + Momentum *float32 + + // Enable nesterov when optimizer is 'sgd'. + Nesterov *bool + + // Number of training epochs. Must be a positive integer. + NumberOfEpochs *int32 + + // Number of data loader workers. Must be a non-negative integer. + NumberOfWorkers *int32 + + // Type of optimizer. + Optimizer *StochasticOptimizer + + // Random seed to be used when using deterministic training. + RandomSeed *int32 + + // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. + StepLRGamma *float32 + + // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. + StepLRStepSize *int32 + + // Training batch size. Must be a positive integer. + TrainingBatchSize *int32 + + // Image crop size that is input to the neural network for the training dataset. Must be a positive integer. + TrainingCropSize *int32 + + // Validation batch size. Must be a positive integer. + ValidationBatchSize *int32 + + // Image crop size that is input to the neural network for the validation dataset. Must be a positive integer. + ValidationCropSize *int32 + + // Image size to which to resize before cropping for validation dataset. Must be a positive integer. + ValidationResizeSize *int32 + + // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. + WarmupCosineLRCycles *float32 + + // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. + WarmupCosineLRWarmupEpochs *int32 + + // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. + WeightDecay *float32 + + // Weighted loss. The accepted values are 0 for no weighted loss. 1 for weighted loss with sqrt.(classweights). 2 for weighted + // loss with classweights. Must be 0 or 1 or 2. + WeightedLoss *int32 +} + +// ImageModelSettingsObjectDetection - Settings used for training the model. For more information on the available settings +// please visit the official documentation: +// https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. +type ImageModelSettingsObjectDetection struct { + // Settings for advanced scenarios. + AdvancedSettings *string + + // Enable AMSGrad when optimizer is 'adam' or 'adamw'. + AmsGradient *bool + + // Settings for using Augmentations. + Augmentations *string + + // Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta1 *float32 + + // Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range [0, 1]. + Beta2 *float32 + + // Maximum number of detections per image, for all classes. Must be a positive integer. Note: This settings is not supported + // for the 'yolov5' algorithm. + BoxDetectionsPerImage *int32 + + // During inference, only return proposals with a classification score greater than BoxScoreThreshold. Must be a float in + // the range[0, 1]. + BoxScoreThreshold *float32 + + // Frequency to store model checkpoints. Must be a positive integer. + CheckpointFrequency *int32 + + // The pretrained checkpoint model for incremental training. + CheckpointModel *MLFlowModelJobInput + + // The id of a previous run that has a pretrained checkpoint for incremental training. + CheckpointRunID *string + + // Whether to use distributed training. + Distributed *bool + + // Enable early stopping logic during training. + EarlyStopping *bool + + // Minimum number of epochs or validation evaluations to wait before primary metric improvement is tracked for early stopping. + // Must be a positive integer. + EarlyStoppingDelay *int32 + + // Minimum number of epochs or validation evaluations with no primary metric improvement before the run is stopped. Must be + // a positive integer. + EarlyStoppingPatience *int32 + + // Enable normalization when exporting ONNX model. + EnableOnnxNormalization *bool + + // Frequency to evaluate validation dataset to get metric scores. Must be a positive integer. + EvaluationFrequency *int32 + + // Gradient accumulation means running a configured number of "GradAccumulationStep" steps without updating the model weights + // while accumulating the gradients of those steps, and then using the + // accumulated gradients to compute the weight updates. Must be a positive integer. + GradientAccumulationStep *int32 + + // Image size for train and validation. Must be a positive integer. Note: The training run may get into CUDA OOM if the size + // is too big. Note: This settings is only supported for the 'yolov5' algorithm. + ImageSize *int32 + + // Number of layers to freeze for the model. Must be a positive integer. For instance, passing 2 as value for 'seresnext' + // means freezing layer0 and layer1. For a full list of models supported and details + // on layer freeze, please see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + LayersToFreeze *int32 + + // Initial learning rate. Must be a float in the range [0, 1]. + LearningRate *float32 + + // Type of learning rate scheduler. Must be 'warmup_cosine' or 'step'. + LearningRateScheduler *LearningRateScheduler + + // Enable computing and logging training metrics. + LogTrainingMetrics *LogTrainingMetrics + + // Enable computing and logging validation loss. + LogValidationLoss *LogValidationLoss + + // Maximum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training + // run may get into CUDA OOM if the size is too big. Note: This settings is not + // supported for the 'yolov5' algorithm. + MaxSize *int32 + + // Minimum size of the image to be rescaled before feeding it to the backbone. Must be a positive integer. Note: training + // run may get into CUDA OOM if the size is too big. Note: This settings is not + // supported for the 'yolov5' algorithm. + MinSize *int32 + + // Name of the model to use for training. For more information on the available models please visit the official documentation: + // https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models. + ModelName *string + + // Model size. Must be 'small', 'medium', 'large', or 'xlarge'. Note: training run may get into CUDA OOM if the model size + // is too big. Note: This settings is only supported for the 'yolov5' algorithm. + ModelSize *ModelSize + + // Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1]. + Momentum *float32 + + // Enable multi-scale image by varying image size by +/- 50%. Note: training run may get into CUDA OOM if no sufficient GPU + // memory. Note: This settings is only supported for the 'yolov5' algorithm. + MultiScale *bool + + // Enable nesterov when optimizer is 'sgd'. + Nesterov *bool + + // IOU threshold used during inference in NMS post processing. Must be a float in the range [0, 1]. + NmsIouThreshold *float32 + + // Number of training epochs. Must be a positive integer. + NumberOfEpochs *int32 + + // Number of data loader workers. Must be a non-negative integer. + NumberOfWorkers *int32 + + // Type of optimizer. + Optimizer *StochasticOptimizer + + // Random seed to be used when using deterministic training. + RandomSeed *int32 + + // Value of gamma when learning rate scheduler is 'step'. Must be a float in the range [0, 1]. + StepLRGamma *float32 + + // Value of step size when learning rate scheduler is 'step'. Must be a positive integer. + StepLRStepSize *int32 + + // The grid size to use for tiling each image. Note: TileGridSize must not be None to enable small object detection logic. + // A string containing two integers in mxn format. Note: This settings is not + // supported for the 'yolov5' algorithm. + TileGridSize *string + + // Overlap ratio between adjacent tiles in each dimension. Must be float in the range [0, 1). Note: This settings is not supported + // for the 'yolov5' algorithm. + TileOverlapRatio *float32 + + // The IOU threshold to use to perform NMS while merging predictions from tiles and image. Used in validation/ inference. + // Must be float in the range [0, 1]. Note: This settings is not supported for the + // 'yolov5' algorithm. + TilePredictionsNmsThreshold *float32 + + // Training batch size. Must be a positive integer. + TrainingBatchSize *int32 + + // Validation batch size. Must be a positive integer. + ValidationBatchSize *int32 + + // IOU threshold to use when computing validation metric. Must be float in the range [0, 1]. + ValidationIouThreshold *float32 + + // Metric computation method to use for validation metrics. + ValidationMetricType *ValidationMetricType + + // Value of cosine cycle when learning rate scheduler is 'warmup_cosine'. Must be a float in the range [0, 1]. + WarmupCosineLRCycles *float32 + + // Value of warmup epochs when learning rate scheduler is 'warmup_cosine'. Must be a positive integer. + WarmupCosineLRWarmupEpochs *int32 + + // Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be a float in the range[0, 1]. + WeightDecay *float32 +} + +// ImageObjectDetection - Image Object Detection. Object detection is used to identify objects in an image and locate each +// object with a bounding box e.g. locate all dogs and cats in an image and draw a bounding box around +// each. +type ImageObjectDetection struct { + // REQUIRED; [Required] Limit settings for the AutoML job. + LimitSettings *ImageLimitSettings + + // REQUIRED; [Required] Task type for AutoMLJob. + TaskType *TaskType + + // REQUIRED; [Required] Training data input. + TrainingData *MLTableJobInput + + // Log verbosity for the job. + LogVerbosity *LogVerbosity + + // Settings used for training the model. + ModelSettings *ImageModelSettingsObjectDetection + + // Primary metric to optimize for this task. + PrimaryMetric *ObjectDetectionPrimaryMetrics + + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*ImageModelDistributionSettingsObjectDetection + + // Model sweeping and hyperparameter sweeping related settings. + SweepSettings *ImageSweepSettings + + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. + TargetColumnName *string + + // Validation data inputs. + ValidationData *MLTableJobInput + + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 +} + +// GetAutoMLVertical implements the AutoMLVerticalClassification interface for type ImageObjectDetection. +func (i *ImageObjectDetection) GetAutoMLVertical() *AutoMLVertical { + return &AutoMLVertical{ + LogVerbosity: i.LogVerbosity, + TargetColumnName: i.TargetColumnName, + TaskType: i.TaskType, + TrainingData: i.TrainingData, + } +} + +type ImageObjectDetectionBase struct { + // REQUIRED; [Required] Limit settings for the AutoML job. + LimitSettings *ImageLimitSettings + + // Settings used for training the model. + ModelSettings *ImageModelSettingsObjectDetection + + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*ImageModelDistributionSettingsObjectDetection + + // Model sweeping and hyperparameter sweeping related settings. + SweepSettings *ImageSweepSettings + + // Validation data inputs. + ValidationData *MLTableJobInput + + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 +} + +// ImageSweepSettings - Model sweeping and hyperparameter sweeping related settings. +type ImageSweepSettings struct { + // REQUIRED; [Required] Type of the hyperparameter sampling algorithms. + SamplingAlgorithm *SamplingAlgorithmType + + // Type of early termination policy. + EarlyTermination EarlyTerminationPolicyClassification +} + +// ImageVertical - Abstract class for AutoML tasks that train image (computer vision) models - such as Image Classification +// / Image Classification Multilabel / Image Object Detection / Image Instance Segmentation. +type ImageVertical struct { + // REQUIRED; [Required] Limit settings for the AutoML job. + LimitSettings *ImageLimitSettings + + // Model sweeping and hyperparameter sweeping related settings. + SweepSettings *ImageSweepSettings + + // Validation data inputs. + ValidationData *MLTableJobInput + + // The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied + // when validation dataset is not provided. + ValidationDataSize *float64 +} + +type ImportDataAction struct { + // REQUIRED; [Required] Specifies the action type of the schedule + ActionType *ScheduleActionType + + // REQUIRED; [Required] Defines Schedule action definition details. + DataImportDefinition *DataImport +} + +// GetScheduleActionBase implements the ScheduleActionBaseClassification interface for type ImportDataAction. +func (i *ImportDataAction) GetScheduleActionBase() *ScheduleActionBase { + return &ScheduleActionBase{ + ActionType: i.ActionType, + } +} + +// IndexColumn - Dto object representing index column +type IndexColumn struct { + // Specifies the column name + ColumnName *string + + // Specifies the data type + DataType *FeatureDataType +} + +type InferenceContainerProperties struct { + // The route to check the liveness of the inference server container. + LivenessRoute *Route + + // The route to check the readiness of the inference server container. + ReadinessRoute *Route + + // The port to send the scoring requests to, within the inference server container. + ScoringRoute *Route +} + +// InferencingServerClassification provides polymorphic access to related types. +// Call the interface's GetInferencingServer() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AzureMLBatchInferencingServer, *AzureMLOnlineInferencingServer, *CustomInferencingServer, *InferencingServer, *TritonInferencingServer +type InferencingServerClassification interface { + // GetInferencingServer returns the InferencingServer content of the underlying type. + GetInferencingServer() *InferencingServer +} + +type InferencingServer struct { + // REQUIRED; [Required] Inferencing server type for various targets. + ServerType *InferencingServerType +} + +// GetInferencingServer implements the InferencingServerClassification interface for type InferencingServer. +func (i *InferencingServer) GetInferencingServer() *InferencingServer { return i } + +// InstanceTypeSchema - Instance type schema. +type InstanceTypeSchema struct { + // Node Selector + NodeSelector map[string]*string + + // Resource requests/limits for this instance type + Resources *InstanceTypeSchemaResources +} + +// InstanceTypeSchemaResources - Resource requests/limits for this instance type +type InstanceTypeSchemaResources struct { + // Resource limits for this instance type + Limits map[string]*string + + // Resource requests for this instance type + Requests map[string]*string +} + +// IntellectualProperty - Intellectual Property details for a resource. +type IntellectualProperty struct { + // REQUIRED; [Required] Publisher of the Intellectual Property. Must be the same as Registry publisher name. + Publisher *string + + // Protection level of the Intellectual Property. + ProtectionLevel *ProtectionLevel +} + +// JobBase - Azure Resource Manager resource envelope. +type JobBase struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties JobBasePropertiesClassification + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// JobBasePropertiesClassification provides polymorphic access to related types. +// Call the interface's GetJobBaseProperties() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AutoMLJob, *CommandJob, *JobBaseProperties, *LabelingJobProperties, *PipelineJob, *SparkJob, *SweepJob +type JobBasePropertiesClassification interface { + // GetJobBaseProperties returns the JobBaseProperties content of the underlying type. + GetJobBaseProperties() *JobBaseProperties +} + +// JobBaseProperties - Base definition for a job. +type JobBaseProperties struct { + // REQUIRED; [Required] Specifies the type of job. + JobType *JobType + + // ARM resource ID of the component resource. + ComponentID *string + + // ARM resource ID of the compute resource. + ComputeID *string + + // The asset description text. + Description *string + + // Display name of job. + DisplayName *string + + // The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment. + ExperimentName *string + + // Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken + // if null. + Identity IdentityConfigurationClassification + + // Is the asset archived? + IsArchived *bool + + // Notification setting for the job + NotificationSetting *NotificationSetting + + // The asset property dictionary. + Properties map[string]*string + + // Configuration for secrets to be made available during runtime. + SecretsConfiguration map[string]*SecretConfiguration + + // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. + Services map[string]*JobService + + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string + + // READ-ONLY; Status of the job. + Status *JobStatus +} + +// GetJobBaseProperties implements the JobBasePropertiesClassification interface for type JobBaseProperties. +func (j *JobBaseProperties) GetJobBaseProperties() *JobBaseProperties { return j } + +// JobBaseResourceArmPaginatedResult - A paginated list of JobBase entities. +type JobBaseResourceArmPaginatedResult struct { + // The link to the next page of JobBase objects. If null, there are no additional pages. + NextLink *string // An array of objects of type JobBase. Value []*JobBase } -// JobInputClassification provides polymorphic access to related types. -// Call the interface's GetJobInput() method to access the common type. +// JobInputClassification provides polymorphic access to related types. +// Call the interface's GetJobInput() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CustomModelJobInput, *JobInput, *LiteralJobInput, *MLFlowModelJobInput, *MLTableJobInput, *TritonModelJobInput, *URIFileJobInput, +// - *URIFolderJobInput +type JobInputClassification interface { + // GetJobInput returns the JobInput content of the underlying type. + GetJobInput() *JobInput +} + +// JobInput - Command job definition. +type JobInput struct { + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType + + // Description for the input. + Description *string +} + +// GetJobInput implements the JobInputClassification interface for type JobInput. +func (j *JobInput) GetJobInput() *JobInput { return j } + +// JobLimitsClassification provides polymorphic access to related types. +// Call the interface's GetJobLimits() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CommandJobLimits, *JobLimits, *SweepJobLimits +type JobLimitsClassification interface { + // GetJobLimits returns the JobLimits content of the underlying type. + GetJobLimits() *JobLimits +} + +type JobLimits struct { + // REQUIRED; [Required] JobLimit type. + JobLimitsType *JobLimitsType + + // The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as + // low as Seconds. + Timeout *string +} + +// GetJobLimits implements the JobLimitsClassification interface for type JobLimits. +func (j *JobLimits) GetJobLimits() *JobLimits { return j } + +// JobOutputClassification provides polymorphic access to related types. +// Call the interface's GetJobOutput() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CustomModelJobOutput, *JobOutput, *MLFlowModelJobOutput, *MLTableJobOutput, *TritonModelJobOutput, *URIFileJobOutput, +// - *URIFolderJobOutput +type JobOutputClassification interface { + // GetJobOutput returns the JobOutput content of the underlying type. + GetJobOutput() *JobOutput +} + +// JobOutput - Job output definition container information on where to find job output/logs. +type JobOutput struct { + // REQUIRED; [Required] Specifies the type of job. + JobOutputType *JobOutputType + + // Description for the output. + Description *string +} + +// GetJobOutput implements the JobOutputClassification interface for type JobOutput. +func (j *JobOutput) GetJobOutput() *JobOutput { return j } + +type JobResourceConfiguration struct { + // Extra arguments to pass to the Docker run command. This would override any parameters that have already been set by the + // system, or in this section. This parameter is only supported for Azure ML + // compute types. + DockerArgs *string + + // Optional number of instances or nodes used by the compute target. + InstanceCount *int32 + + // Optional type of VM used as supported by the compute target. + InstanceType *string + + // Locations where the job can run. + Locations []*string + + // Optional max allowed number of instances or nodes to be used by the compute target. For use with elastic training, currently + // supported by PyTorch distribution type only. + MaxInstanceCount *int32 + + // Additional properties bag. + Properties map[string]any + + // Size of the docker container's shared memory block. This should be in the format of (number)(unit) where number as to be + // greater than 0 and the unit can be one of b(bytes), k(kilobytes), m(megabytes), + // or g(gigabytes). + ShmSize *string +} + +type JobScheduleAction struct { + // REQUIRED; [Required] Specifies the action type of the schedule + ActionType *ScheduleActionType + + // REQUIRED; [Required] Defines Schedule action definition details. + JobDefinition JobBasePropertiesClassification +} + +// GetScheduleActionBase implements the ScheduleActionBaseClassification interface for type JobScheduleAction. +func (j *JobScheduleAction) GetScheduleActionBase() *ScheduleActionBase { + return &ScheduleActionBase{ + ActionType: j.ActionType, + } +} + +// JobService - Job endpoint definition +type JobService struct { + // Url for endpoint. + Endpoint *string + + // Endpoint type. + JobServiceType *string + + // Nodes that user would like to start the service on. If Nodes is not set or set to null, the service will only be started + // on leader node. + Nodes NodesClassification + + // Port for endpoint set by user. + Port *int32 + + // Additional properties to set on the endpoint. + Properties map[string]*string + + // READ-ONLY; Any error in the service. + ErrorMessage *string + + // READ-ONLY; Status of endpoint. + Status *string +} + +// JobsClientBeginCancelOptions contains the optional parameters for the JobsClient.BeginCancel method. +type JobsClientBeginCancelOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// JobsClientBeginDeleteOptions contains the optional parameters for the JobsClient.BeginDelete method. +type JobsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// JobsClientCreateOrUpdateOptions contains the optional parameters for the JobsClient.CreateOrUpdate method. +type JobsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// JobsClientGetOptions contains the optional parameters for the JobsClient.Get method. +type JobsClientGetOptions struct { + // placeholder for future optional parameters +} + +// JobsClientListOptions contains the optional parameters for the JobsClient.NewListPager method. +type JobsClientListOptions struct { + // Asset name the job's named output is registered with + AssetName *string + // Type of job to be returned. + JobType *string + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // The scheduled id for listing the job triggered from + ScheduleID *string + // Indicator whether the job is scheduled job. + Scheduled *bool + // Continuation token for pagination. + Skip *string + // Jobs returned will have this tag key. + Tag *string +} + +// JobsClientUpdateOptions contains the optional parameters for the JobsClient.Update method. +type JobsClientUpdateOptions struct { + // placeholder for future optional parameters +} + +type KerberosCredentials struct { + // REQUIRED; [Required] IP Address or DNS HostName. + KerberosKdcAddress *string + + // REQUIRED; [Required] Kerberos Username + KerberosPrincipal *string + + // REQUIRED; [Required] Domain over which a Kerberos authentication server has the authority to authenticate a user, host + // or service. + KerberosRealm *string +} + +type KerberosKeytabCredentials struct { + // REQUIRED; [Required] Credential type used to authentication with storage. + CredentialsType *CredentialsType + + // REQUIRED; [Required] IP Address or DNS HostName. + KerberosKdcAddress *string + + // REQUIRED; [Required] Kerberos Username + KerberosPrincipal *string + + // REQUIRED; [Required] Domain over which a Kerberos authentication server has the authority to authenticate a user, host + // or service. + KerberosRealm *string + + // REQUIRED; [Required] Keytab secrets. + Secrets *KerberosKeytabSecrets +} + +// GetDatastoreCredentials implements the DatastoreCredentialsClassification interface for type KerberosKeytabCredentials. +func (k *KerberosKeytabCredentials) GetDatastoreCredentials() *DatastoreCredentials { + return &DatastoreCredentials{ + CredentialsType: k.CredentialsType, + } +} + +type KerberosKeytabSecrets struct { + // REQUIRED; [Required] Credential type used to authentication with storage. + SecretsType *SecretsType + + // Kerberos keytab secret. + KerberosKeytab *string +} + +// GetDatastoreSecrets implements the DatastoreSecretsClassification interface for type KerberosKeytabSecrets. +func (k *KerberosKeytabSecrets) GetDatastoreSecrets() *DatastoreSecrets { + return &DatastoreSecrets{ + SecretsType: k.SecretsType, + } +} + +type KerberosPasswordCredentials struct { + // REQUIRED; [Required] Credential type used to authentication with storage. + CredentialsType *CredentialsType + + // REQUIRED; [Required] IP Address or DNS HostName. + KerberosKdcAddress *string + + // REQUIRED; [Required] Kerberos Username + KerberosPrincipal *string + + // REQUIRED; [Required] Domain over which a Kerberos authentication server has the authority to authenticate a user, host + // or service. + KerberosRealm *string + + // REQUIRED; [Required] Kerberos password secrets. + Secrets *KerberosPasswordSecrets +} + +// GetDatastoreCredentials implements the DatastoreCredentialsClassification interface for type KerberosPasswordCredentials. +func (k *KerberosPasswordCredentials) GetDatastoreCredentials() *DatastoreCredentials { + return &DatastoreCredentials{ + CredentialsType: k.CredentialsType, + } +} + +type KerberosPasswordSecrets struct { + // REQUIRED; [Required] Credential type used to authentication with storage. + SecretsType *SecretsType + + // Kerberos password secret. + KerberosPassword *string +} + +// GetDatastoreSecrets implements the DatastoreSecretsClassification interface for type KerberosPasswordSecrets. +func (k *KerberosPasswordSecrets) GetDatastoreSecrets() *DatastoreSecrets { + return &DatastoreSecrets{ + SecretsType: k.SecretsType, + } +} + +// KeyVaultProperties - Customer Key vault properties. +type KeyVaultProperties struct { + // REQUIRED; KeyVault key identifier to encrypt the data + KeyIdentifier *string + + // REQUIRED; KeyVault Arm Id that contains the data encryption key + KeyVaultArmID *string + + // Currently, we support only SystemAssigned MSI. We need this when we support UserAssignedIdentities + IdentityClientID *string +} + +// Kubernetes - A Machine Learning compute based on Kubernetes Compute. +type Kubernetes struct { + // REQUIRED; The type of compute + ComputeType *ComputeType + + // Location for the underlying compute + ComputeLocation *string + + // The description of the Machine Learning compute. + Description *string + + // Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication. + DisableLocalAuth *bool + + // Properties of Kubernetes + Properties *KubernetesProperties + + // ARM resource id of the underlying compute + ResourceID *string + + // READ-ONLY; The time at which the compute was created. + CreatedOn *time.Time + + // READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning + // service provisioned it if false. + IsAttachedCompute *bool + + // READ-ONLY; The time at which the compute was last modified. + ModifiedOn *time.Time + + // READ-ONLY; Errors during provisioning + ProvisioningErrors []*ErrorResponse + + // READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. + ProvisioningState *ProvisioningState +} + +// GetCompute implements the ComputeClassification interface for type Kubernetes. +func (k *Kubernetes) GetCompute() *Compute { + return &Compute{ + ComputeType: k.ComputeType, + ComputeLocation: k.ComputeLocation, + ProvisioningState: k.ProvisioningState, + Description: k.Description, + CreatedOn: k.CreatedOn, + ModifiedOn: k.ModifiedOn, + ResourceID: k.ResourceID, + ProvisioningErrors: k.ProvisioningErrors, + IsAttachedCompute: k.IsAttachedCompute, + DisableLocalAuth: k.DisableLocalAuth, + } +} + +// KubernetesOnlineDeployment - Properties specific to a KubernetesOnlineDeployment. +type KubernetesOnlineDeployment struct { + // REQUIRED; [Required] The compute type of the endpoint. + EndpointComputeType *EndpointComputeType + + // If true, enables Application Insights logging. + AppInsightsEnabled *bool + + // Code configuration for the endpoint deployment. + CodeConfiguration *CodeConfiguration + + // The resource requirements for the container (cpu and memory). + ContainerResourceRequirements *ContainerResourceRequirements + + // The mdc configuration, we disable mdc when it's null. + DataCollector *DataCollector + + // Description of the endpoint deployment. + Description *string + + // If Enabled, allow egress public network access. If Disabled, this will create secure egress. Default: Enabled. + EgressPublicNetworkAccess *EgressPublicNetworkAccessType + + // ARM resource ID of the environment specification for the endpoint deployment. + EnvironmentID *string + + // Environment variables configuration for the deployment. + EnvironmentVariables map[string]*string + + // Compute instance type. + InstanceType *string + + // Liveness probe monitors the health of the container regularly. + LivenessProbe *ProbeSettings + + // The URI path to the model. + Model *string + + // The path to mount the model in custom container. + ModelMountPath *string + + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string + + // Readiness probe validates if the container is ready to serve traffic. The properties and defaults are the same as liveness + // probe. + ReadinessProbe *ProbeSettings + + // Request settings for the deployment. + RequestSettings *OnlineRequestSettings + + // Scale settings for the deployment. If it is null or not provided, it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment + // and to DefaultScaleSettings for ManagedOnlineDeployment. + ScaleSettings OnlineScaleSettingsClassification + + // READ-ONLY; Provisioning state for the endpoint deployment. + ProvisioningState *DeploymentProvisioningState +} + +// GetOnlineDeploymentProperties implements the OnlineDeploymentPropertiesClassification interface for type KubernetesOnlineDeployment. +func (k *KubernetesOnlineDeployment) GetOnlineDeploymentProperties() *OnlineDeploymentProperties { + return &OnlineDeploymentProperties{ + AppInsightsEnabled: k.AppInsightsEnabled, + DataCollector: k.DataCollector, + EgressPublicNetworkAccess: k.EgressPublicNetworkAccess, + EndpointComputeType: k.EndpointComputeType, + InstanceType: k.InstanceType, + LivenessProbe: k.LivenessProbe, + Model: k.Model, + ModelMountPath: k.ModelMountPath, + ProvisioningState: k.ProvisioningState, + ReadinessProbe: k.ReadinessProbe, + RequestSettings: k.RequestSettings, + ScaleSettings: k.ScaleSettings, + CodeConfiguration: k.CodeConfiguration, + Description: k.Description, + EnvironmentID: k.EnvironmentID, + EnvironmentVariables: k.EnvironmentVariables, + Properties: k.Properties, + } +} + +// KubernetesProperties - Kubernetes properties +type KubernetesProperties struct { + // Default instance type + DefaultInstanceType *string + + // Extension instance release train. + ExtensionInstanceReleaseTrain *string + + // Extension principal-id. + ExtensionPrincipalID *string + + // Instance Type Schema + InstanceTypes map[string]*InstanceTypeSchema + + // Compute namespace + Namespace *string + + // Relay connection string. + RelayConnectionString *string + + // ServiceBus connection string. + ServiceBusConnectionString *string + + // VC name. + VcName *string +} + +// KubernetesSchema - Kubernetes Compute Schema +type KubernetesSchema struct { + // Properties of Kubernetes + Properties *KubernetesProperties +} + +// LabelCategory - Label category definition +type LabelCategory struct { + // Dictionary of label classes in this category. + Classes map[string]*LabelClass + + // Display name of the label category. + DisplayName *string + + // Indicates whether it is allowed to select multiple classes in this category. + MultiSelect *MultiSelect +} + +// LabelClass - Label class definition +type LabelClass struct { + // Display name of the label class. + DisplayName *string + + // Dictionary of subclasses of the label class. + Subclasses map[string]*LabelClass +} + +// LabelingDataConfiguration - Labeling data configuration definition +type LabelingDataConfiguration struct { + // Resource Id of the data asset to perform labeling. + DataID *string + + // Indicates whether to enable incremental data refresh. + IncrementalDataRefresh *IncrementalDataRefresh +} + +// LabelingJob - Azure Resource Manager resource envelope. +type LabelingJob struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *LabelingJobProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// LabelingJobImageProperties - Properties of a labeling job for image data +type LabelingJobImageProperties struct { + // REQUIRED; [Required] Media type of the job. + MediaType *MediaType + + // Annotation type of image labeling job. + AnnotationType *ImageAnnotationType +} + +// GetLabelingJobMediaProperties implements the LabelingJobMediaPropertiesClassification interface for type LabelingJobImageProperties. +func (l *LabelingJobImageProperties) GetLabelingJobMediaProperties() *LabelingJobMediaProperties { + return &LabelingJobMediaProperties{ + MediaType: l.MediaType, + } +} + +// LabelingJobInstructions - Instructions for labeling job +type LabelingJobInstructions struct { + // The link to a page with detailed labeling instructions for labelers. + URI *string +} + +// LabelingJobMediaPropertiesClassification provides polymorphic access to related types. +// Call the interface's GetLabelingJobMediaProperties() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *LabelingJobImageProperties, *LabelingJobMediaProperties, *LabelingJobTextProperties +type LabelingJobMediaPropertiesClassification interface { + // GetLabelingJobMediaProperties returns the LabelingJobMediaProperties content of the underlying type. + GetLabelingJobMediaProperties() *LabelingJobMediaProperties +} + +// LabelingJobMediaProperties - Properties of a labeling job +type LabelingJobMediaProperties struct { + // REQUIRED; [Required] Media type of the job. + MediaType *MediaType +} + +// GetLabelingJobMediaProperties implements the LabelingJobMediaPropertiesClassification interface for type LabelingJobMediaProperties. +func (l *LabelingJobMediaProperties) GetLabelingJobMediaProperties() *LabelingJobMediaProperties { + return l +} + +// LabelingJobProperties - Labeling job definition +type LabelingJobProperties struct { + // REQUIRED; [Required] Specifies the type of job. + JobType *JobType + + // ARM resource ID of the component resource. + ComponentID *string + + // ARM resource ID of the compute resource. + ComputeID *string + + // Configuration of data used in the job. + DataConfiguration *LabelingDataConfiguration + + // The asset description text. + Description *string + + // Display name of job. + DisplayName *string + + // The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment. + ExperimentName *string + + // Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken + // if null. + Identity IdentityConfigurationClassification + + // Is the asset archived? + IsArchived *bool + + // Labeling instructions of the job. + JobInstructions *LabelingJobInstructions + + // Label categories of the job. + LabelCategories map[string]*LabelCategory + + // Media type specific properties in the job. + LabelingJobMediaProperties LabelingJobMediaPropertiesClassification + + // Configuration of MLAssist feature in the job. + MlAssistConfiguration MLAssistConfigurationClassification + + // Notification setting for the job + NotificationSetting *NotificationSetting + + // The asset property dictionary. + Properties map[string]*string + + // Configuration for secrets to be made available during runtime. + SecretsConfiguration map[string]*SecretConfiguration + + // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. + Services map[string]*JobService + + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string + + // READ-ONLY; Created time of the job in UTC timezone. + CreatedDateTime *time.Time + + // READ-ONLY; Progress metrics of the job. + ProgressMetrics *ProgressMetrics + + // READ-ONLY; Internal id of the job(Previously called project). + ProjectID *string + + // READ-ONLY; Specifies the labeling job provisioning state. + ProvisioningState *JobProvisioningState + + // READ-ONLY; Status of the job. + Status *JobStatus + + // READ-ONLY; Status messages of the job. + StatusMessages []*StatusMessage +} + +// GetJobBaseProperties implements the JobBasePropertiesClassification interface for type LabelingJobProperties. +func (l *LabelingJobProperties) GetJobBaseProperties() *JobBaseProperties { + return &JobBaseProperties{ + ComponentID: l.ComponentID, + ComputeID: l.ComputeID, + DisplayName: l.DisplayName, + ExperimentName: l.ExperimentName, + Identity: l.Identity, + IsArchived: l.IsArchived, + JobType: l.JobType, + NotificationSetting: l.NotificationSetting, + SecretsConfiguration: l.SecretsConfiguration, + Services: l.Services, + Status: l.Status, + Description: l.Description, + Properties: l.Properties, + Tags: l.Tags, + } +} + +// LabelingJobResourceArmPaginatedResult - A paginated list of LabelingJob entities. +type LabelingJobResourceArmPaginatedResult struct { + // The link to the next page of LabelingJob objects. If null, there are no additional pages. + NextLink *string + + // An array of objects of type LabelingJob. + Value []*LabelingJob +} + +// LabelingJobTextProperties - Properties of a labeling job for text data +type LabelingJobTextProperties struct { + // REQUIRED; [Required] Media type of the job. + MediaType *MediaType + + // Annotation type of text labeling job. + AnnotationType *TextAnnotationType +} + +// GetLabelingJobMediaProperties implements the LabelingJobMediaPropertiesClassification interface for type LabelingJobTextProperties. +func (l *LabelingJobTextProperties) GetLabelingJobMediaProperties() *LabelingJobMediaProperties { + return &LabelingJobMediaProperties{ + MediaType: l.MediaType, + } +} + +// LabelingJobsClientBeginCreateOrUpdateOptions contains the optional parameters for the LabelingJobsClient.BeginCreateOrUpdate +// method. +type LabelingJobsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LabelingJobsClientBeginExportLabelsOptions contains the optional parameters for the LabelingJobsClient.BeginExportLabels +// method. +type LabelingJobsClientBeginExportLabelsOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LabelingJobsClientBeginResumeOptions contains the optional parameters for the LabelingJobsClient.BeginResume method. +type LabelingJobsClientBeginResumeOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LabelingJobsClientDeleteOptions contains the optional parameters for the LabelingJobsClient.Delete method. +type LabelingJobsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// LabelingJobsClientGetOptions contains the optional parameters for the LabelingJobsClient.Get method. +type LabelingJobsClientGetOptions struct { + // Boolean value to indicate whether to include JobInstructions in response. + IncludeJobInstructions *bool + // Boolean value to indicate Whether to include LabelCategories in response. + IncludeLabelCategories *bool +} + +// LabelingJobsClientListOptions contains the optional parameters for the LabelingJobsClient.NewListPager method. +type LabelingJobsClientListOptions struct { + // Continuation token for pagination. + Skip *string + // Number of labeling jobs to return. + Top *int32 +} + +// LabelingJobsClientPauseOptions contains the optional parameters for the LabelingJobsClient.Pause method. +type LabelingJobsClientPauseOptions struct { + // placeholder for future optional parameters +} + +type LakeHouseArtifact struct { + // REQUIRED; [Required] OneLake artifact name + ArtifactName *string + + // REQUIRED; [Required] OneLake artifact type + ArtifactType *OneLakeArtifactType +} + +// GetOneLakeArtifact implements the OneLakeArtifactClassification interface for type LakeHouseArtifact. +func (l *LakeHouseArtifact) GetOneLakeArtifact() *OneLakeArtifact { + return &OneLakeArtifact{ + ArtifactName: l.ArtifactName, + ArtifactType: l.ArtifactType, + } +} + +// ListAmlUserFeatureResult - The List Aml user feature operation response. +type ListAmlUserFeatureResult struct { + // READ-ONLY; The URI to fetch the next page of AML user features information. Call ListNext() with this to fetch the next + // page of AML user features information. + NextLink *string + + // READ-ONLY; The list of AML user facing features. + Value []*AmlUserFeature +} + +type ListNotebookKeysResult struct { + // READ-ONLY; The primary access key of the Notebook + PrimaryAccessKey *string + + // READ-ONLY; The secondary access key of the Notebook + SecondaryAccessKey *string +} + +type ListStorageAccountKeysResult struct { + // READ-ONLY; The access key of the storage + UserStorageKey *string +} + +// ListUsagesResult - The List Usages operation response. +type ListUsagesResult struct { + // READ-ONLY; The URI to fetch the next page of AML resource usage information. Call ListNext() with this to fetch the next + // page of AML resource usage information. + NextLink *string + + // READ-ONLY; The list of AML resource usages. + Value []*Usage +} + +type ListWorkspaceKeysResult struct { + ContainerRegistryCredentials *RegistryListCredentialsResult + NotebookAccessKeys *ListNotebookKeysResult + + // READ-ONLY; The access key of the workspace app insights + AppInsightsInstrumentationKey *string + + // READ-ONLY; The arm Id key of the workspace storage + UserStorageArmID *string + + // READ-ONLY; The access key of the workspace storage + UserStorageKey *string +} + +// ListWorkspaceQuotas - The List WorkspaceQuotasByVMFamily operation response. +type ListWorkspaceQuotas struct { + // READ-ONLY; The URI to fetch the next page of workspace quota information by VM Family. Call ListNext() with this to fetch + // the next page of Workspace Quota information. + NextLink *string + + // READ-ONLY; The list of Workspace Quotas by VM Family + Value []*ResourceQuota +} + +// LiteralJobInput - Literal input type. +type LiteralJobInput struct { + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType + + // REQUIRED; [Required] Literal value for the input. + Value *string + + // Description for the input. + Description *string +} + +// GetJobInput implements the JobInputClassification interface for type LiteralJobInput. +func (l *LiteralJobInput) GetJobInput() *JobInput { + return &JobInput{ + Description: l.Description, + JobInputType: l.JobInputType, + } +} + +// MLAssistConfigurationClassification provides polymorphic access to related types. +// Call the interface's GetMLAssistConfiguration() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *MLAssistConfiguration, *MLAssistConfigurationDisabled, *MLAssistConfigurationEnabled +type MLAssistConfigurationClassification interface { + // GetMLAssistConfiguration returns the MLAssistConfiguration content of the underlying type. + GetMLAssistConfiguration() *MLAssistConfiguration +} + +// MLAssistConfiguration - Labeling MLAssist configuration definition +type MLAssistConfiguration struct { + // REQUIRED; [Required] Indicates whether MLAssist feature is enabled. + MlAssist *MLAssistConfigurationType +} + +// GetMLAssistConfiguration implements the MLAssistConfigurationClassification interface for type MLAssistConfiguration. +func (m *MLAssistConfiguration) GetMLAssistConfiguration() *MLAssistConfiguration { return m } + +// MLAssistConfigurationDisabled - Labeling MLAssist configuration definition when MLAssist is disabled +type MLAssistConfigurationDisabled struct { + // REQUIRED; [Required] Indicates whether MLAssist feature is enabled. + MlAssist *MLAssistConfigurationType +} + +// GetMLAssistConfiguration implements the MLAssistConfigurationClassification interface for type MLAssistConfigurationDisabled. +func (m *MLAssistConfigurationDisabled) GetMLAssistConfiguration() *MLAssistConfiguration { + return &MLAssistConfiguration{ + MlAssist: m.MlAssist, + } +} + +// MLAssistConfigurationEnabled - Labeling MLAssist configuration definition when MLAssist is enabled +type MLAssistConfigurationEnabled struct { + // REQUIRED; [Required] AML compute binding used in inferencing. + InferencingComputeBinding *string + + // REQUIRED; [Required] Indicates whether MLAssist feature is enabled. + MlAssist *MLAssistConfigurationType + + // REQUIRED; [Required] AML compute binding used in training. + TrainingComputeBinding *string +} + +// GetMLAssistConfiguration implements the MLAssistConfigurationClassification interface for type MLAssistConfigurationEnabled. +func (m *MLAssistConfigurationEnabled) GetMLAssistConfiguration() *MLAssistConfiguration { + return &MLAssistConfiguration{ + MlAssist: m.MlAssist, + } +} + +type MLFlowModelJobInput struct { + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType + + // REQUIRED; [Required] Input Asset URI. + URI *string + + // Description for the input. + Description *string + + // Input Asset Delivery Mode. + Mode *InputDeliveryMode +} + +// GetJobInput implements the JobInputClassification interface for type MLFlowModelJobInput. +func (m *MLFlowModelJobInput) GetJobInput() *JobInput { + return &JobInput{ + Description: m.Description, + JobInputType: m.JobInputType, + } +} + +type MLFlowModelJobOutput struct { + // REQUIRED; [Required] Specifies the type of job. + JobOutputType *JobOutputType + + // Output Asset Name. + AssetName *string + + // Output Asset Version. + AssetVersion *string + + // Auto delete setting of output data asset. + AutoDeleteSetting *AutoDeleteSetting + + // Description for the output. + Description *string + + // Output Asset Delivery Mode. + Mode *OutputDeliveryMode + + // Output Asset URI. + URI *string +} + +// GetJobOutput implements the JobOutputClassification interface for type MLFlowModelJobOutput. +func (m *MLFlowModelJobOutput) GetJobOutput() *JobOutput { + return &JobOutput{ + Description: m.Description, + JobOutputType: m.JobOutputType, + } +} + +// MLTableData - MLTable data definition +type MLTableData struct { + // REQUIRED; [Required] Specifies the type of data. + DataType *DataType + + // REQUIRED; [Required] Uri of the data. Example: https://go.microsoft.com/fwlink/?linkid=2202330 + DataURI *string + + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + + // The asset description text. + Description *string + + // Intellectual Property details. Used if data is an Intellectual Property. + IntellectualProperty *IntellectualProperty + + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous + IsAnonymous *bool + + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived + IsArchived *bool + + // The asset property dictionary. + Properties map[string]*string + + // Uris referenced in the MLTable definition (required for lineage) + ReferencedUris []*string + + // Stage in the data lifecycle assigned to this data asset + Stage *string + + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string +} + +// GetDataVersionBaseProperties implements the DataVersionBasePropertiesClassification interface for type MLTableData. +func (m *MLTableData) GetDataVersionBaseProperties() *DataVersionBaseProperties { + return &DataVersionBaseProperties{ + DataType: m.DataType, + DataURI: m.DataURI, + IntellectualProperty: m.IntellectualProperty, + Stage: m.Stage, + AutoDeleteSetting: m.AutoDeleteSetting, + IsAnonymous: m.IsAnonymous, + IsArchived: m.IsArchived, + Description: m.Description, + Properties: m.Properties, + Tags: m.Tags, + } +} + +type MLTableJobInput struct { + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType + + // REQUIRED; [Required] Input Asset URI. + URI *string + + // Description for the input. + Description *string + + // Input Asset Delivery Mode. + Mode *InputDeliveryMode +} + +// GetJobInput implements the JobInputClassification interface for type MLTableJobInput. +func (m *MLTableJobInput) GetJobInput() *JobInput { + return &JobInput{ + Description: m.Description, + JobInputType: m.JobInputType, + } +} + +type MLTableJobOutput struct { + // REQUIRED; [Required] Specifies the type of job. + JobOutputType *JobOutputType + + // Output Asset Name. + AssetName *string + + // Output Asset Version. + AssetVersion *string + + // Auto delete setting of output data asset. + AutoDeleteSetting *AutoDeleteSetting + + // Description for the output. + Description *string + + // Output Asset Delivery Mode. + Mode *OutputDeliveryMode + + // Output Asset URI. + URI *string +} + +// GetJobOutput implements the JobOutputClassification interface for type MLTableJobOutput. +func (m *MLTableJobOutput) GetJobOutput() *JobOutput { + return &JobOutput{ + Description: m.Description, + JobOutputType: m.JobOutputType, + } +} + +// ManagedComputeIdentity - Managed compute identity definition. +type ManagedComputeIdentity struct { + // REQUIRED; [Required] Monitor compute identity type enum. + ComputeIdentityType *MonitorComputeIdentityType + + // Managed service identity (system assigned and/or user assigned identities) + Identity *ManagedServiceIdentity +} + +// GetMonitorComputeIdentityBase implements the MonitorComputeIdentityBaseClassification interface for type ManagedComputeIdentity. +func (m *ManagedComputeIdentity) GetMonitorComputeIdentityBase() *MonitorComputeIdentityBase { + return &MonitorComputeIdentityBase{ + ComputeIdentityType: m.ComputeIdentityType, + } +} + +// ManagedIdentity - Managed identity configuration. +type ManagedIdentity struct { + // REQUIRED; [Required] Specifies the type of identity framework. + IdentityType *IdentityConfigurationType + + // Specifies a user-assigned identity by client ID. For system-assigned, do not set this field. + ClientID *string + + // Specifies a user-assigned identity by object ID. For system-assigned, do not set this field. + ObjectID *string + + // Specifies a user-assigned identity by ARM resource ID. For system-assigned, do not set this field. + ResourceID *string +} + +// GetIdentityConfiguration implements the IdentityConfigurationClassification interface for type ManagedIdentity. +func (m *ManagedIdentity) GetIdentityConfiguration() *IdentityConfiguration { + return &IdentityConfiguration{ + IdentityType: m.IdentityType, + } +} + +type ManagedIdentityAuthTypeWorkspaceConnectionProperties struct { + // REQUIRED; Authentication type of the connection target + AuthType *ConnectionAuthType + + // Category of the connection + Category *ConnectionCategory + Credentials *WorkspaceConnectionManagedIdentity + ExpiryTime *time.Time + + // Anything + Metadata any + Target *string +} + +// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type ManagedIdentityAuthTypeWorkspaceConnectionProperties. +func (m *ManagedIdentityAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { + return &WorkspaceConnectionPropertiesV2{ + AuthType: m.AuthType, + Category: m.Category, + ExpiryTime: m.ExpiryTime, + Metadata: m.Metadata, + Target: m.Target, + } +} + +// ManagedNetworkProvisionOptions - Managed Network Provisioning options for managed network of a machine learning workspace. +type ManagedNetworkProvisionOptions struct { + IncludeSpark *bool +} + +// ManagedNetworkProvisionStatus - Status of the Provisioning for the managed network of a machine learning workspace. +type ManagedNetworkProvisionStatus struct { + SparkReady *bool + + // Status for the managed network of a machine learning workspace. + Status *ManagedNetworkStatus +} + +// ManagedNetworkProvisionsClientBeginProvisionManagedNetworkOptions contains the optional parameters for the ManagedNetworkProvisionsClient.BeginProvisionManagedNetwork +// method. +type ManagedNetworkProvisionsClientBeginProvisionManagedNetworkOptions struct { + // Managed Network Provisioning Options for a machine learning workspace. + Body *ManagedNetworkProvisionOptions + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ManagedNetworkSettings - Managed Network settings for a machine learning workspace. +type ManagedNetworkSettings struct { + // Isolation mode for the managed network of a machine learning workspace. + IsolationMode *IsolationMode + + // Dictionary of + OutboundRules map[string]OutboundRuleClassification + + // Status of the Provisioning for the managed network of a machine learning workspace. + Status *ManagedNetworkProvisionStatus + + // READ-ONLY + NetworkID *string +} + +// ManagedNetworkSettingsRuleClientBeginCreateOrUpdateOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.BeginCreateOrUpdate +// method. +type ManagedNetworkSettingsRuleClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ManagedNetworkSettingsRuleClientBeginDeleteOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.BeginDelete +// method. +type ManagedNetworkSettingsRuleClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ManagedNetworkSettingsRuleClientGetOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.Get +// method. +type ManagedNetworkSettingsRuleClientGetOptions struct { + // placeholder for future optional parameters +} + +// ManagedNetworkSettingsRuleClientListOptions contains the optional parameters for the ManagedNetworkSettingsRuleClient.NewListPager +// method. +type ManagedNetworkSettingsRuleClientListOptions struct { + // placeholder for future optional parameters +} + +// ManagedOnlineDeployment - Properties specific to a ManagedOnlineDeployment. +type ManagedOnlineDeployment struct { + // REQUIRED; [Required] The compute type of the endpoint. + EndpointComputeType *EndpointComputeType + + // If true, enables Application Insights logging. + AppInsightsEnabled *bool + + // Code configuration for the endpoint deployment. + CodeConfiguration *CodeConfiguration + + // The mdc configuration, we disable mdc when it's null. + DataCollector *DataCollector + + // Description of the endpoint deployment. + Description *string + + // If Enabled, allow egress public network access. If Disabled, this will create secure egress. Default: Enabled. + EgressPublicNetworkAccess *EgressPublicNetworkAccessType + + // ARM resource ID of the environment specification for the endpoint deployment. + EnvironmentID *string + + // Environment variables configuration for the deployment. + EnvironmentVariables map[string]*string + + // Compute instance type. + InstanceType *string + + // Liveness probe monitors the health of the container regularly. + LivenessProbe *ProbeSettings + + // The URI path to the model. + Model *string + + // The path to mount the model in custom container. + ModelMountPath *string + + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string + + // Readiness probe validates if the container is ready to serve traffic. The properties and defaults are the same as liveness + // probe. + ReadinessProbe *ProbeSettings + + // Request settings for the deployment. + RequestSettings *OnlineRequestSettings + + // Scale settings for the deployment. If it is null or not provided, it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment + // and to DefaultScaleSettings for ManagedOnlineDeployment. + ScaleSettings OnlineScaleSettingsClassification + + // READ-ONLY; Provisioning state for the endpoint deployment. + ProvisioningState *DeploymentProvisioningState +} + +// GetOnlineDeploymentProperties implements the OnlineDeploymentPropertiesClassification interface for type ManagedOnlineDeployment. +func (m *ManagedOnlineDeployment) GetOnlineDeploymentProperties() *OnlineDeploymentProperties { + return &OnlineDeploymentProperties{ + AppInsightsEnabled: m.AppInsightsEnabled, + DataCollector: m.DataCollector, + EgressPublicNetworkAccess: m.EgressPublicNetworkAccess, + EndpointComputeType: m.EndpointComputeType, + InstanceType: m.InstanceType, + LivenessProbe: m.LivenessProbe, + Model: m.Model, + ModelMountPath: m.ModelMountPath, + ProvisioningState: m.ProvisioningState, + ReadinessProbe: m.ReadinessProbe, + RequestSettings: m.RequestSettings, + ScaleSettings: m.ScaleSettings, + CodeConfiguration: m.CodeConfiguration, + Description: m.Description, + EnvironmentID: m.EnvironmentID, + EnvironmentVariables: m.EnvironmentVariables, + Properties: m.Properties, + } +} + +// ManagedServiceIdentity - Managed service identity (system assigned and/or user assigned identities) +type ManagedServiceIdentity struct { + // REQUIRED; Type of managed service identity (where both SystemAssigned and UserAssigned types are allowed). + Type *ManagedServiceIdentityType + + // The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM + // resource ids in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. + // The dictionary values can be empty objects ({}) in + // requests. + UserAssignedIdentities map[string]*UserAssignedIdentity + + // READ-ONLY; The service principal ID of the system assigned identity. This property will only be provided for a system assigned + // identity. + PrincipalID *string + + // READ-ONLY; The tenant ID of the system assigned identity. This property will only be provided for a system assigned identity. + TenantID *string +} + +// MaterializationComputeResource - Dto object representing compute resource +type MaterializationComputeResource struct { + // Specifies the instance type + InstanceType *string +} + +type MaterializationSettings struct { + // Specifies the notification details + Notification *NotificationSetting + + // Specifies the compute resource settings + Resource *MaterializationComputeResource + + // Specifies the schedule details + Schedule *RecurrenceTrigger + + // Specifies the spark compute settings + SparkConfiguration map[string]*string + + // Specifies the stores to which materialization should happen + StoreType *MaterializationStoreType +} + +// MedianStoppingPolicy - Defines an early termination policy based on running averages of the primary metric of all runs +type MedianStoppingPolicy struct { + // REQUIRED; [Required] Name of policy configuration + PolicyType *EarlyTerminationPolicyType + + // Number of intervals by which to delay the first evaluation. + DelayEvaluation *int32 + + // Interval (number of runs) between policy evaluations. + EvaluationInterval *int32 +} + +// GetEarlyTerminationPolicy implements the EarlyTerminationPolicyClassification interface for type MedianStoppingPolicy. +func (m *MedianStoppingPolicy) GetEarlyTerminationPolicy() *EarlyTerminationPolicy { + return &EarlyTerminationPolicy{ + DelayEvaluation: m.DelayEvaluation, + EvaluationInterval: m.EvaluationInterval, + PolicyType: m.PolicyType, + } +} + +// ModelConfiguration - Model configuration options. +type ModelConfiguration struct { + // Input delivery mode for the model. + Mode *PackageInputDeliveryMode + + // Relative mounting path of the model in the target image. + MountPath *string +} + +// ModelContainer - Azure Resource Manager resource envelope. +type ModelContainer struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *ModelContainerProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +type ModelContainerProperties struct { + // The asset description text. + Description *string + + // Is the asset archived? + IsArchived *bool + + // The asset property dictionary. + Properties map[string]*string + + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string + + // READ-ONLY; The latest version inside this container. + LatestVersion *string + + // READ-ONLY; The next auto incremental version + NextVersion *string + + // READ-ONLY; Provisioning state for the model container. + ProvisioningState *AssetProvisioningState +} + +// ModelContainerResourceArmPaginatedResult - A paginated list of ModelContainer entities. +type ModelContainerResourceArmPaginatedResult struct { + // The link to the next page of ModelContainer objects. If null, there are no additional pages. + NextLink *string + + // An array of objects of type ModelContainer. + Value []*ModelContainer +} + +// ModelContainersClientCreateOrUpdateOptions contains the optional parameters for the ModelContainersClient.CreateOrUpdate +// method. +type ModelContainersClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// ModelContainersClientDeleteOptions contains the optional parameters for the ModelContainersClient.Delete method. +type ModelContainersClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// ModelContainersClientGetOptions contains the optional parameters for the ModelContainersClient.Get method. +type ModelContainersClientGetOptions struct { + // placeholder for future optional parameters +} + +// ModelContainersClientListOptions contains the optional parameters for the ModelContainersClient.NewListPager method. +type ModelContainersClientListOptions struct { + // Maximum number of results to return. + Count *int32 + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // Continuation token for pagination. + Skip *string +} + +// ModelPackageInput - Model package input options. +type ModelPackageInput struct { + // REQUIRED; [Required] Type of the input included in the target image. + InputType *PackageInputType + + // REQUIRED; [Required] Location of the input. + Path PackageInputPathBaseClassification + + // Input delivery mode of the input. + Mode *PackageInputDeliveryMode + + // Relative mount path of the input in the target image. + MountPath *string +} + +// ModelPerformanceMetricThresholdBaseClassification provides polymorphic access to related types. +// Call the interface's GetModelPerformanceMetricThresholdBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ClassificationModelPerformanceMetricThreshold, *ModelPerformanceMetricThresholdBase, *RegressionModelPerformanceMetricThreshold +type ModelPerformanceMetricThresholdBaseClassification interface { + // GetModelPerformanceMetricThresholdBase returns the ModelPerformanceMetricThresholdBase content of the underlying type. + GetModelPerformanceMetricThresholdBase() *ModelPerformanceMetricThresholdBase +} + +type ModelPerformanceMetricThresholdBase struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + ModelType *MonitoringModelType + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetModelPerformanceMetricThresholdBase implements the ModelPerformanceMetricThresholdBaseClassification interface for type +// ModelPerformanceMetricThresholdBase. +func (m *ModelPerformanceMetricThresholdBase) GetModelPerformanceMetricThresholdBase() *ModelPerformanceMetricThresholdBase { + return m +} + +// ModelPerformanceSignal - Model performance signal definition. +type ModelPerformanceSignal struct { + // REQUIRED; [Required] A list of metrics to calculate and their associated thresholds. + MetricThreshold ModelPerformanceMetricThresholdBaseClassification + + // REQUIRED; [Required] The data produced by the production service which drift will be calculated for. + ProductionData []MonitoringInputDataBaseClassification + + // REQUIRED; [Required] The data to calculate drift against. + ReferenceData MonitoringInputDataBaseClassification + + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType + + // The data segment. + DataSegment *MonitoringDataSegment + + // The current notification mode for this signal. + Mode *MonitoringNotificationMode + + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string +} + +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type ModelPerformanceSignal. +func (m *ModelPerformanceSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: m.Mode, + Properties: m.Properties, + SignalType: m.SignalType, + } +} + +// ModelVersion - Azure Resource Manager resource envelope. +type ModelVersion struct { + // REQUIRED; [Required] Additional attributes of the entity. + Properties *ModelVersionProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// ModelVersionProperties - Model asset version details. +type ModelVersionProperties struct { + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + + // The asset description text. + Description *string + + // Mapping of model flavors to their properties. + Flavors map[string]*FlavorData + + // Intellectual Property details. Used if model is an Intellectual Property. + IntellectualProperty *IntellectualProperty + + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous + IsAnonymous *bool + + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived + IsArchived *bool + + // Name of the training job which produced this model + JobName *string + + // The storage format for this entity. Used for NCD. + ModelType *string + + // The URI path to the model contents. + ModelURI *string + + // The asset property dictionary. + Properties map[string]*string + + // Stage in the model lifecycle assigned to this model + Stage *string + + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string + + // READ-ONLY; Provisioning state for the model version. + ProvisioningState *AssetProvisioningState +} + +// ModelVersionResourceArmPaginatedResult - A paginated list of ModelVersion entities. +type ModelVersionResourceArmPaginatedResult struct { + // The link to the next page of ModelVersion objects. If null, there are no additional pages. + NextLink *string + + // An array of objects of type ModelVersion. + Value []*ModelVersion +} + +// ModelVersionsClientBeginPackageOptions contains the optional parameters for the ModelVersionsClient.BeginPackage method. +type ModelVersionsClientBeginPackageOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ModelVersionsClientCreateOrUpdateOptions contains the optional parameters for the ModelVersionsClient.CreateOrUpdate method. +type ModelVersionsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// ModelVersionsClientDeleteOptions contains the optional parameters for the ModelVersionsClient.Delete method. +type ModelVersionsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// ModelVersionsClientGetOptions contains the optional parameters for the ModelVersionsClient.Get method. +type ModelVersionsClientGetOptions struct { + // placeholder for future optional parameters +} + +// ModelVersionsClientListOptions contains the optional parameters for the ModelVersionsClient.NewListPager method. +type ModelVersionsClientListOptions struct { + // Model description. + Description *string + // Name of the feed. + Feed *string + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // Number of initial results to skip. + Offset *int32 + // Ordering of list. + OrderBy *string + // Comma-separated list of property names (and optionally values). Example: prop1,prop2=value2 + Properties *string + // Continuation token for pagination. + Skip *string + // Model stage + Stage *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string + // Maximum number of records to return. + Top *int32 + // Model version. + Version *string +} + +// MonitorComputeConfigurationBaseClassification provides polymorphic access to related types. +// Call the interface's GetMonitorComputeConfigurationBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *MonitorComputeConfigurationBase, *MonitorServerlessSparkCompute +type MonitorComputeConfigurationBaseClassification interface { + // GetMonitorComputeConfigurationBase returns the MonitorComputeConfigurationBase content of the underlying type. + GetMonitorComputeConfigurationBase() *MonitorComputeConfigurationBase +} + +// MonitorComputeConfigurationBase - Monitor compute configuration base definition. +type MonitorComputeConfigurationBase struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + ComputeType *MonitorComputeType +} + +// GetMonitorComputeConfigurationBase implements the MonitorComputeConfigurationBaseClassification interface for type MonitorComputeConfigurationBase. +func (m *MonitorComputeConfigurationBase) GetMonitorComputeConfigurationBase() *MonitorComputeConfigurationBase { + return m +} + +// MonitorComputeIdentityBaseClassification provides polymorphic access to related types. +// Call the interface's GetMonitorComputeIdentityBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AmlTokenComputeIdentity, *ManagedComputeIdentity, *MonitorComputeIdentityBase +type MonitorComputeIdentityBaseClassification interface { + // GetMonitorComputeIdentityBase returns the MonitorComputeIdentityBase content of the underlying type. + GetMonitorComputeIdentityBase() *MonitorComputeIdentityBase +} + +// MonitorComputeIdentityBase - Monitor compute identity base definition. +type MonitorComputeIdentityBase struct { + // REQUIRED; [Required] Monitor compute identity type enum. + ComputeIdentityType *MonitorComputeIdentityType +} + +// GetMonitorComputeIdentityBase implements the MonitorComputeIdentityBaseClassification interface for type MonitorComputeIdentityBase. +func (m *MonitorComputeIdentityBase) GetMonitorComputeIdentityBase() *MonitorComputeIdentityBase { + return m +} + +type MonitorDefinition struct { + // REQUIRED; [Required] The ARM resource ID of the compute resource to run the monitoring job on. + ComputeConfiguration MonitorComputeConfigurationBaseClassification + + // REQUIRED; [Required] The signals to monitor. + Signals map[string]MonitoringSignalBaseClassification + + // The monitor's notification settings. + AlertNotificationSetting MonitoringAlertNotificationSettingsBaseClassification + + // The ARM resource ID of either the model or deployment targeted by this monitor. + MonitoringTarget *MonitoringTarget +} + +// MonitorServerlessSparkCompute - Monitor serverless spark compute definition. +type MonitorServerlessSparkCompute struct { + // REQUIRED; [Required] The identity scheme leveraged to by the spark jobs running on serverless Spark. + ComputeIdentity MonitorComputeIdentityBaseClassification + + // REQUIRED; [Required] Specifies the type of signal to monitor. + ComputeType *MonitorComputeType + + // REQUIRED; [Required] The instance type running the Spark job. + InstanceType *string + + // REQUIRED; [Required] The Spark runtime version. + RuntimeVersion *string +} + +// GetMonitorComputeConfigurationBase implements the MonitorComputeConfigurationBaseClassification interface for type MonitorServerlessSparkCompute. +func (m *MonitorServerlessSparkCompute) GetMonitorComputeConfigurationBase() *MonitorComputeConfigurationBase { + return &MonitorComputeConfigurationBase{ + ComputeType: m.ComputeType, + } +} + +// MonitoringAlertNotificationSettingsBaseClassification provides polymorphic access to related types. +// Call the interface's GetMonitoringAlertNotificationSettingsBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AzMonMonitoringAlertNotificationSettings, *EmailMonitoringAlertNotificationSettings, *MonitoringAlertNotificationSettingsBase +type MonitoringAlertNotificationSettingsBaseClassification interface { + // GetMonitoringAlertNotificationSettingsBase returns the MonitoringAlertNotificationSettingsBase content of the underlying type. + GetMonitoringAlertNotificationSettingsBase() *MonitoringAlertNotificationSettingsBase +} + +type MonitoringAlertNotificationSettingsBase struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + AlertNotificationType *MonitoringAlertNotificationType +} + +// GetMonitoringAlertNotificationSettingsBase implements the MonitoringAlertNotificationSettingsBaseClassification interface +// for type MonitoringAlertNotificationSettingsBase. +func (m *MonitoringAlertNotificationSettingsBase) GetMonitoringAlertNotificationSettingsBase() *MonitoringAlertNotificationSettingsBase { + return m +} + +type MonitoringDataSegment struct { + // The feature to segment the data on. + Feature *string + + // Filters for only the specified values of the given segmented feature. + Values []*string +} + +// MonitoringFeatureFilterBaseClassification provides polymorphic access to related types. +// Call the interface's GetMonitoringFeatureFilterBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AllFeatures, *FeatureSubset, *MonitoringFeatureFilterBase, *TopNFeaturesByAttribution +type MonitoringFeatureFilterBaseClassification interface { + // GetMonitoringFeatureFilterBase returns the MonitoringFeatureFilterBase content of the underlying type. + GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase +} + +type MonitoringFeatureFilterBase struct { + // REQUIRED; [Required] Specifies the feature filter to leverage when selecting features to calculate metrics over. + FilterType *MonitoringFeatureFilterType +} + +// GetMonitoringFeatureFilterBase implements the MonitoringFeatureFilterBaseClassification interface for type MonitoringFeatureFilterBase. +func (m *MonitoringFeatureFilterBase) GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase { + return m +} + +// MonitoringInputDataBaseClassification provides polymorphic access to related types. +// Call the interface's GetMonitoringInputDataBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *FixedInputData, *MonitoringInputDataBase, *StaticInputData, *TrailingInputData +type MonitoringInputDataBaseClassification interface { + // GetMonitoringInputDataBase returns the MonitoringInputDataBase content of the underlying type. + GetMonitoringInputDataBase() *MonitoringInputDataBase +} + +// MonitoringInputDataBase - Monitoring input data base definition. +type MonitoringInputDataBase struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + InputDataType *MonitoringInputDataType + + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType + + // REQUIRED; [Required] Input Asset URI. + URI *string + + // Mapping of column names to special uses. + Columns map[string]*string + + // The context metadata of the data source. + DataContext *string +} + +// GetMonitoringInputDataBase implements the MonitoringInputDataBaseClassification interface for type MonitoringInputDataBase. +func (m *MonitoringInputDataBase) GetMonitoringInputDataBase() *MonitoringInputDataBase { return m } + +// MonitoringSignalBaseClassification provides polymorphic access to related types. +// Call the interface's GetMonitoringSignalBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CustomMonitoringSignal, *DataDriftMonitoringSignal, *DataQualityMonitoringSignal, *FeatureAttributionDriftMonitoringSignal, +// - *GenerationSafetyQualityMonitoringSignal, *GenerationTokenStatisticsSignal, *ModelPerformanceSignal, *MonitoringSignalBase, +// - *PredictionDriftMonitoringSignal +type MonitoringSignalBaseClassification interface { + // GetMonitoringSignalBase returns the MonitoringSignalBase content of the underlying type. + GetMonitoringSignalBase() *MonitoringSignalBase +} + +type MonitoringSignalBase struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType + + // The current notification mode for this signal. + Mode *MonitoringNotificationMode + + // Property dictionary. Properties can be added, but not removed or altered. + Properties map[string]*string +} + +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type MonitoringSignalBase. +func (m *MonitoringSignalBase) GetMonitoringSignalBase() *MonitoringSignalBase { return m } + +// MonitoringTarget - Monitoring target definition. +type MonitoringTarget struct { + // REQUIRED; [Required] The machine learning task type of the model. + TaskType *ModelTaskType + + // The ARM resource ID of either the deployment targeted by this monitor. + DeploymentID *string + + // The ARM resource ID of either the model targeted by this monitor. + ModelID *string +} + +type MonitoringThreshold struct { + // The threshold value. If null, the set default is dependent on the metric type. + Value *float64 +} + +// MonitoringWorkspaceConnection - Monitoring workspace connection definition. +type MonitoringWorkspaceConnection struct { + // The properties of a workspace service connection to store as environment variables in the submitted jobs. Key is workspace + // connection property path, name is environment variable key. + EnvironmentVariables map[string]*string + + // The properties of a workspace service connection to store as secrets in the submitted jobs. Key is workspace connection + // property path, name is secret key. + Secrets map[string]*string +} + +// Mpi - MPI distribution configuration. +type Mpi struct { + // REQUIRED; [Required] Specifies the type of distribution framework. + DistributionType *DistributionType + + // Number of processes per MPI node. + ProcessCountPerInstance *int32 +} + +// GetDistributionConfiguration implements the DistributionConfigurationClassification interface for type Mpi. +func (m *Mpi) GetDistributionConfiguration() *DistributionConfiguration { + return &DistributionConfiguration{ + DistributionType: m.DistributionType, + } +} + +// NCrossValidationsClassification provides polymorphic access to related types. +// Call the interface's GetNCrossValidations() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AutoNCrossValidations, *CustomNCrossValidations, *NCrossValidations +type NCrossValidationsClassification interface { + // GetNCrossValidations returns the NCrossValidations content of the underlying type. + GetNCrossValidations() *NCrossValidations +} + +// NCrossValidations - N-Cross validations value. +type NCrossValidations struct { + // REQUIRED; [Required] Mode for determining N-Cross validations. + Mode *NCrossValidationsMode +} + +// GetNCrossValidations implements the NCrossValidationsClassification interface for type NCrossValidations. +func (n *NCrossValidations) GetNCrossValidations() *NCrossValidations { return n } + +// NlpFixedParameters - Fixed training parameters that won't be swept over during AutoML NLP training. +type NlpFixedParameters struct { + // Number of steps to accumulate gradients over before running a backward pass. + GradientAccumulationSteps *int32 + + // The learning rate for the training procedure. + LearningRate *float32 + + // The type of learning rate schedule to use during the training procedure. + LearningRateScheduler *NlpLearningRateScheduler + + // The name of the model to train. + ModelName *string + + // Number of training epochs. + NumberOfEpochs *int32 + + // The batch size for the training procedure. + TrainingBatchSize *int32 + + // The batch size to be used during evaluation. + ValidationBatchSize *int32 + + // The warmup ratio, used alongside LrSchedulerType. + WarmupRatio *float32 + + // The weight decay for the training procedure. + WeightDecay *float32 +} + +// NlpParameterSubspace - Stringified search spaces for each parameter. See below examples. +type NlpParameterSubspace struct { + // Number of steps to accumulate gradients over before running a backward pass. + GradientAccumulationSteps *string + + // The learning rate for the training procedure. + LearningRate *string + + // The type of learning rate schedule to use during the training procedure. + LearningRateScheduler *string + + // The name of the model to train. + ModelName *string + + // Number of training epochs. + NumberOfEpochs *string + + // The batch size for the training procedure. + TrainingBatchSize *string + + // The batch size to be used during evaluation. + ValidationBatchSize *string + + // The warmup ratio, used alongside LrSchedulerType. + WarmupRatio *string + + // The weight decay for the training procedure. + WeightDecay *string +} + +// NlpSweepSettings - Model sweeping and hyperparameter tuning related settings. +type NlpSweepSettings struct { + // REQUIRED; [Required] Type of sampling algorithm. + SamplingAlgorithm *SamplingAlgorithmType + + // Type of early termination policy for the sweeping job. + EarlyTermination EarlyTerminationPolicyClassification +} + +// NlpVertical - Abstract class for NLP related AutoML tasks. NLP - Natural Language Processing. +type NlpVertical struct { + // Featurization inputs needed for AutoML job. + FeaturizationSettings *NlpVerticalFeaturizationSettings + + // Model/training parameters that will remain constant throughout training. + FixedParameters *NlpFixedParameters + + // Execution constraints for AutoMLJob. + LimitSettings *NlpVerticalLimitSettings + + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*NlpParameterSubspace + + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *NlpSweepSettings + + // Validation data inputs. + ValidationData *MLTableJobInput +} + +type NlpVerticalFeaturizationSettings struct { + // Dataset language, useful for the text data. + DatasetLanguage *string +} + +// NlpVerticalLimitSettings - Job execution constraints. +type NlpVerticalLimitSettings struct { + // Maximum Concurrent AutoML iterations. + MaxConcurrentTrials *int32 + + // Maximum nodes to use for the experiment. + MaxNodes *int32 + + // Number of AutoML iterations. + MaxTrials *int32 + + // AutoML job timeout. + Timeout *string + + // Timeout for individual HD trials. + TrialTimeout *string +} + +// NodeStateCounts - Counts of various compute node states on the amlCompute. +type NodeStateCounts struct { + // READ-ONLY; Number of compute nodes in idle state. + IdleNodeCount *int32 + + // READ-ONLY; Number of compute nodes which are leaving the amlCompute. + LeavingNodeCount *int32 + + // READ-ONLY; Number of compute nodes which are in preempted state. + PreemptedNodeCount *int32 + + // READ-ONLY; Number of compute nodes which are being prepared. + PreparingNodeCount *int32 + + // READ-ONLY; Number of compute nodes which are running jobs. + RunningNodeCount *int32 + + // READ-ONLY; Number of compute nodes which are in unusable state. + UnusableNodeCount *int32 +} + +// NodesClassification provides polymorphic access to related types. +// Call the interface's GetNodes() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *CustomModelJobInput, *JobInput, *LiteralJobInput, *MLFlowModelJobInput, *MLTableJobInput, *TritonModelJobInput, *URIFileJobInput, -// - *URIFolderJobInput -type JobInputClassification interface { - // GetJobInput returns the JobInput content of the underlying type. - GetJobInput() *JobInput +// - *AllNodes, *Nodes +type NodesClassification interface { + // GetNodes returns the Nodes content of the underlying type. + GetNodes() *Nodes } -// JobInput - Command job definition. -type JobInput struct { - // REQUIRED; [Required] Specifies the type of job. - JobInputType *JobInputType - - // Description for the input. - Description *string +// Nodes - Abstract Nodes definition +type Nodes struct { + // REQUIRED; [Required] Type of the Nodes value + NodesValueType *NodesValueType } -// GetJobInput implements the JobInputClassification interface for type JobInput. -func (j *JobInput) GetJobInput() *JobInput { return j } +// GetNodes implements the NodesClassification interface for type Nodes. +func (n *Nodes) GetNodes() *Nodes { return n } -// JobLimitsClassification provides polymorphic access to related types. -// Call the interface's GetJobLimits() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *CommandJobLimits, *JobLimits, *SweepJobLimits -type JobLimitsClassification interface { - // GetJobLimits returns the JobLimits content of the underlying type. - GetJobLimits() *JobLimits -} +type NoneAuthTypeWorkspaceConnectionProperties struct { + // REQUIRED; Authentication type of the connection target + AuthType *ConnectionAuthType -type JobLimits struct { - // REQUIRED; [Required] JobLimit type. - JobLimitsType *JobLimitsType + // Category of the connection + Category *ConnectionCategory + ExpiryTime *time.Time - // The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as - // low as Seconds. - Timeout *string + // Anything + Metadata any + Target *string } -// GetJobLimits implements the JobLimitsClassification interface for type JobLimits. -func (j *JobLimits) GetJobLimits() *JobLimits { return j } +// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type NoneAuthTypeWorkspaceConnectionProperties. +func (n *NoneAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { + return &WorkspaceConnectionPropertiesV2{ + AuthType: n.AuthType, + Category: n.Category, + ExpiryTime: n.ExpiryTime, + Metadata: n.Metadata, + Target: n.Target, + } +} -// JobOutputClassification provides polymorphic access to related types. -// Call the interface's GetJobOutput() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *CustomModelJobOutput, *JobOutput, *MLFlowModelJobOutput, *MLTableJobOutput, *TritonModelJobOutput, *URIFileJobOutput, -// - *URIFolderJobOutput -type JobOutputClassification interface { - // GetJobOutput returns the JobOutput content of the underlying type. - GetJobOutput() *JobOutput +// NoneDatastoreCredentials - Empty/none datastore credentials. +type NoneDatastoreCredentials struct { + // REQUIRED; [Required] Credential type used to authentication with storage. + CredentialsType *CredentialsType } -// JobOutput - Job output definition container information on where to find job output/logs. -type JobOutput struct { - // REQUIRED; [Required] Specifies the type of job. - JobOutputType *JobOutputType +// GetDatastoreCredentials implements the DatastoreCredentialsClassification interface for type NoneDatastoreCredentials. +func (n *NoneDatastoreCredentials) GetDatastoreCredentials() *DatastoreCredentials { + return &DatastoreCredentials{ + CredentialsType: n.CredentialsType, + } +} - // Description for the output. - Description *string +type NotebookAccessTokenResult struct { + // READ-ONLY + AccessToken *string + + // READ-ONLY + ExpiresIn *int32 + + // READ-ONLY + HostName *string + + // READ-ONLY + NotebookResourceID *string + + // READ-ONLY + PublicDNS *string + + // READ-ONLY + RefreshToken *string + + // READ-ONLY + Scope *string + + // READ-ONLY + TokenType *string } -// GetJobOutput implements the JobOutputClassification interface for type JobOutput. -func (j *JobOutput) GetJobOutput() *JobOutput { return j } +type NotebookPreparationError struct { + ErrorMessage *string + StatusCode *int32 +} -type JobResourceConfiguration struct { - // Extra arguments to pass to the Docker run command. This would override any parameters that have already been set by the - // system, or in this section. This parameter is only supported for Azure ML - // compute types. - DockerArgs *string +type NotebookResourceInfo struct { + Fqdn *string + IsPrivateLinkEnabled *bool - // Optional number of instances or nodes used by the compute target. - InstanceCount *int32 + // The error that occurs when preparing notebook. + NotebookPreparationError *NotebookPreparationError - // Optional type of VM used as supported by the compute target. - InstanceType *string + // the data plane resourceId that used to initialize notebook component + ResourceID *string +} - // Additional properties bag. - Properties map[string]any +// NotificationSetting - Configuration for notification. +type NotificationSetting struct { + // Send email notification to user on specified notification type + EmailOn []*EmailNotificationEnableType - // Size of the docker container's shared memory block. This should be in the format of (number)(unit) where number as to be - // greater than 0 and the unit can be one of b(bytes), k(kilobytes), m(megabytes), - // or g(gigabytes). - ShmSize *string + // This is the email recipient list which has a limitation of 499 characters in total concat with comma separator + Emails []*string + + // Send webhook callback to a service. Key is a user-provided name for the webhook. + Webhooks map[string]WebhookClassification } -type JobScheduleAction struct { - // REQUIRED; [Required] Specifies the action type of the schedule - ActionType *ScheduleActionType +type NumericalDataDriftMetricThreshold struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType - // REQUIRED; [Required] Defines Schedule action definition details. - JobDefinition JobBasePropertiesClassification + // REQUIRED; [Required] The numerical data drift metric to calculate. + Metric *NumericalDataDriftMetric + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold } -// GetScheduleActionBase implements the ScheduleActionBaseClassification interface for type JobScheduleAction. -func (j *JobScheduleAction) GetScheduleActionBase() *ScheduleActionBase { - return &ScheduleActionBase{ - ActionType: j.ActionType, +// GetDataDriftMetricThresholdBase implements the DataDriftMetricThresholdBaseClassification interface for type NumericalDataDriftMetricThreshold. +func (n *NumericalDataDriftMetricThreshold) GetDataDriftMetricThresholdBase() *DataDriftMetricThresholdBase { + return &DataDriftMetricThresholdBase{ + DataType: n.DataType, + Threshold: n.Threshold, } } -// JobService - Job endpoint definition -type JobService struct { - // Url for endpoint. - Endpoint *string +type NumericalDataQualityMetricThreshold struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType - // Endpoint type. - JobServiceType *string + // REQUIRED; [Required] The numerical data quality metric to calculate. + Metric *NumericalDataQualityMetric - // Port for endpoint. - Port *int32 + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} - // Additional properties to set on the endpoint. - Properties map[string]*string +// GetDataQualityMetricThresholdBase implements the DataQualityMetricThresholdBaseClassification interface for type NumericalDataQualityMetricThreshold. +func (n *NumericalDataQualityMetricThreshold) GetDataQualityMetricThresholdBase() *DataQualityMetricThresholdBase { + return &DataQualityMetricThresholdBase{ + DataType: n.DataType, + Threshold: n.Threshold, + } +} - // READ-ONLY; Any error in the service. - ErrorMessage *string +type NumericalPredictionDriftMetricThreshold struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType - // READ-ONLY; Status of endpoint. - Status *string -} + // REQUIRED; [Required] The numerical prediction drift metric to calculate. + Metric *NumericalPredictionDriftMetric -// JobsClientBeginCancelOptions contains the optional parameters for the JobsClient.BeginCancel method. -type JobsClientBeginCancelOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold } -// JobsClientBeginDeleteOptions contains the optional parameters for the JobsClient.BeginDelete method. -type JobsClientBeginDeleteOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string +// GetPredictionDriftMetricThresholdBase implements the PredictionDriftMetricThresholdBaseClassification interface for type +// NumericalPredictionDriftMetricThreshold. +func (n *NumericalPredictionDriftMetricThreshold) GetPredictionDriftMetricThresholdBase() *PredictionDriftMetricThresholdBase { + return &PredictionDriftMetricThresholdBase{ + DataType: n.DataType, + Threshold: n.Threshold, + } } -// JobsClientCreateOrUpdateOptions contains the optional parameters for the JobsClient.CreateOrUpdate method. -type JobsClientCreateOrUpdateOptions struct { - // placeholder for future optional parameters +// Objective - Optimization objective. +type Objective struct { + // REQUIRED; [Required] Defines supported metric goals for hyperparameter tuning + Goal *Goal + + // REQUIRED; [Required] Name of the metric to optimize. + PrimaryMetric *string } -// JobsClientGetOptions contains the optional parameters for the JobsClient.Get method. -type JobsClientGetOptions struct { - // placeholder for future optional parameters +// OneLakeArtifactClassification provides polymorphic access to related types. +// Call the interface's GetOneLakeArtifact() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *LakeHouseArtifact, *OneLakeArtifact +type OneLakeArtifactClassification interface { + // GetOneLakeArtifact returns the OneLakeArtifact content of the underlying type. + GetOneLakeArtifact() *OneLakeArtifact } -// JobsClientListOptions contains the optional parameters for the JobsClient.NewListPager method. -type JobsClientListOptions struct { - // Type of job to be returned. - JobType *string - // View type for including/excluding (for example) archived entities. - ListViewType *ListViewType - // Continuation token for pagination. - Skip *string - // Jobs returned will have this tag key. - Tag *string +// OneLakeArtifact - OneLake artifact (data source) configuration. +type OneLakeArtifact struct { + // REQUIRED; [Required] OneLake artifact name + ArtifactName *string + + // REQUIRED; [Required] OneLake artifact type + ArtifactType *OneLakeArtifactType } -// Kubernetes - A Machine Learning compute based on Kubernetes Compute. -type Kubernetes struct { - // REQUIRED; The type of compute - ComputeType *ComputeType +// GetOneLakeArtifact implements the OneLakeArtifactClassification interface for type OneLakeArtifact. +func (o *OneLakeArtifact) GetOneLakeArtifact() *OneLakeArtifact { return o } - // Location for the underlying compute - ComputeLocation *string +// OneLakeDatastore - OneLake (Trident) datastore configuration. +type OneLakeDatastore struct { + // REQUIRED; [Required] OneLake artifact backing the datastore. + Artifact OneLakeArtifactClassification - // The description of the Machine Learning compute. + // REQUIRED; [Required] Account credentials. + Credentials DatastoreCredentialsClassification + + // REQUIRED; [Required] Storage type backing the datastore. + DatastoreType *DatastoreType + + // REQUIRED; [Required] OneLake workspace name. + OneLakeWorkspaceName *string + + // The asset description text. Description *string - // Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication. - DisableLocalAuth *bool + // OneLake endpoint to use for the datastore. + Endpoint *string - // Properties of Kubernetes - Properties *KubernetesProperties + // Intellectual Property details. + IntellectualProperty *IntellectualProperty - // ARM resource id of the underlying compute - ResourceID *string + // The asset property dictionary. + Properties map[string]*string - // READ-ONLY; The time at which the compute was created. - CreatedOn *time.Time + // Indicates which identity to use to authenticate service data access to customer's storage. + ServiceDataAccessAuthIdentity *ServiceDataAccessAuthIdentity - // READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning - // service provisioned it if false. - IsAttachedCompute *bool + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string - // READ-ONLY; The time at which the compute was last modified. - ModifiedOn *time.Time + // READ-ONLY; Readonly property to indicate if datastore is the workspace default datastore + IsDefault *bool +} + +// GetDatastoreProperties implements the DatastorePropertiesClassification interface for type OneLakeDatastore. +func (o *OneLakeDatastore) GetDatastoreProperties() *DatastoreProperties { + return &DatastoreProperties{ + Credentials: o.Credentials, + DatastoreType: o.DatastoreType, + IntellectualProperty: o.IntellectualProperty, + IsDefault: o.IsDefault, + Description: o.Description, + Properties: o.Properties, + Tags: o.Tags, + } +} + +type OnlineDeployment struct { + // REQUIRED; The geo-location where the resource lives + Location *string + + // REQUIRED; [Required] Additional attributes of the entity. + Properties OnlineDeploymentPropertiesClassification + + // Managed service identity (system assigned and/or user assigned identities) + Identity *ManagedServiceIdentity + + // Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type. + Kind *string + + // Sku details required for ARM contract for Autoscaling. + SKU *SKU + + // Resource tags. + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string - // READ-ONLY; Errors during provisioning - ProvisioningErrors []*ErrorResponse + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData - // READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. - ProvisioningState *ProvisioningState + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string } -// GetCompute implements the ComputeClassification interface for type Kubernetes. -func (k *Kubernetes) GetCompute() *Compute { - return &Compute{ - ComputeType: k.ComputeType, - ComputeLocation: k.ComputeLocation, - ProvisioningState: k.ProvisioningState, - Description: k.Description, - CreatedOn: k.CreatedOn, - ModifiedOn: k.ModifiedOn, - ResourceID: k.ResourceID, - ProvisioningErrors: k.ProvisioningErrors, - IsAttachedCompute: k.IsAttachedCompute, - DisableLocalAuth: k.DisableLocalAuth, - } +// OnlineDeploymentPropertiesClassification provides polymorphic access to related types. +// Call the interface's GetOnlineDeploymentProperties() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *KubernetesOnlineDeployment, *ManagedOnlineDeployment, *OnlineDeploymentProperties +type OnlineDeploymentPropertiesClassification interface { + // GetOnlineDeploymentProperties returns the OnlineDeploymentProperties content of the underlying type. + GetOnlineDeploymentProperties() *OnlineDeploymentProperties } -// KubernetesOnlineDeployment - Properties specific to a KubernetesOnlineDeployment. -type KubernetesOnlineDeployment struct { +type OnlineDeploymentProperties struct { // REQUIRED; [Required] The compute type of the endpoint. EndpointComputeType *EndpointComputeType @@ -5015,8 +9109,8 @@ type KubernetesOnlineDeployment struct { // Code configuration for the endpoint deployment. CodeConfiguration *CodeConfiguration - // The resource requirements for the container (cpu and memory). - ContainerResourceRequirements *ContainerResourceRequirements + // The mdc configuration, we disable mdc when it's null. + DataCollector *DataCollector // Description of the endpoint deployment. Description *string @@ -5024,7 +9118,7 @@ type KubernetesOnlineDeployment struct { // If Enabled, allow egress public network access. If Disabled, this will create secure egress. Default: Enabled. EgressPublicNetworkAccess *EgressPublicNetworkAccessType - // ARM resource ID or AssetId of the environment specification for the endpoint deployment. + // ARM resource ID of the environment specification for the endpoint deployment. EnvironmentID *string // Environment variables configuration for the deployment. @@ -5060,438 +9154,307 @@ type KubernetesOnlineDeployment struct { ProvisioningState *DeploymentProvisioningState } -// GetOnlineDeploymentProperties implements the OnlineDeploymentPropertiesClassification interface for type KubernetesOnlineDeployment. -func (k *KubernetesOnlineDeployment) GetOnlineDeploymentProperties() *OnlineDeploymentProperties { - return &OnlineDeploymentProperties{ - AppInsightsEnabled: k.AppInsightsEnabled, - EgressPublicNetworkAccess: k.EgressPublicNetworkAccess, - EndpointComputeType: k.EndpointComputeType, - InstanceType: k.InstanceType, - LivenessProbe: k.LivenessProbe, - Model: k.Model, - ModelMountPath: k.ModelMountPath, - ProvisioningState: k.ProvisioningState, - ReadinessProbe: k.ReadinessProbe, - RequestSettings: k.RequestSettings, - ScaleSettings: k.ScaleSettings, - CodeConfiguration: k.CodeConfiguration, - Description: k.Description, - EnvironmentID: k.EnvironmentID, - EnvironmentVariables: k.EnvironmentVariables, - Properties: k.Properties, - } -} - -// KubernetesProperties - Kubernetes properties -type KubernetesProperties struct { - // Default instance type - DefaultInstanceType *string - - // Extension instance release train. - ExtensionInstanceReleaseTrain *string - - // Extension principal-id. - ExtensionPrincipalID *string - - // Instance Type Schema - InstanceTypes map[string]*InstanceTypeSchema - - // Compute namespace - Namespace *string - - // Relay connection string. - RelayConnectionString *string - - // ServiceBus connection string. - ServiceBusConnectionString *string - - // VC name. - VcName *string -} - -// KubernetesSchema - Kubernetes Compute Schema -type KubernetesSchema struct { - // Properties of Kubernetes - Properties *KubernetesProperties +// GetOnlineDeploymentProperties implements the OnlineDeploymentPropertiesClassification interface for type OnlineDeploymentProperties. +func (o *OnlineDeploymentProperties) GetOnlineDeploymentProperties() *OnlineDeploymentProperties { + return o } -// ListAmlUserFeatureResult - The List Aml user feature operation response. -type ListAmlUserFeatureResult struct { - // READ-ONLY; The URI to fetch the next page of AML user features information. Call ListNext() with this to fetch the next - // page of AML user features information. +// OnlineDeploymentTrackedResourceArmPaginatedResult - A paginated list of OnlineDeployment entities. +type OnlineDeploymentTrackedResourceArmPaginatedResult struct { + // The link to the next page of OnlineDeployment objects. If null, there are no additional pages. NextLink *string - // READ-ONLY; The list of AML user facing features. - Value []*AmlUserFeature + // An array of objects of type OnlineDeployment. + Value []*OnlineDeployment } -type ListNotebookKeysResult struct { - // READ-ONLY - PrimaryAccessKey *string - - // READ-ONLY - SecondaryAccessKey *string +// OnlineDeploymentsClientBeginCreateOrUpdateOptions contains the optional parameters for the OnlineDeploymentsClient.BeginCreateOrUpdate +// method. +type OnlineDeploymentsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -type ListStorageAccountKeysResult struct { - // READ-ONLY - UserStorageKey *string +// OnlineDeploymentsClientBeginDeleteOptions contains the optional parameters for the OnlineDeploymentsClient.BeginDelete +// method. +type OnlineDeploymentsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// ListUsagesResult - The List Usages operation response. -type ListUsagesResult struct { - // READ-ONLY; The URI to fetch the next page of AML resource usage information. Call ListNext() with this to fetch the next - // page of AML resource usage information. - NextLink *string - - // READ-ONLY; The list of AML resource usages. - Value []*Usage +// OnlineDeploymentsClientBeginUpdateOptions contains the optional parameters for the OnlineDeploymentsClient.BeginUpdate +// method. +type OnlineDeploymentsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -type ListWorkspaceKeysResult struct { - // READ-ONLY - AppInsightsInstrumentationKey *string - - // READ-ONLY - ContainerRegistryCredentials *RegistryListCredentialsResult - - // READ-ONLY - NotebookAccessKeys *ListNotebookKeysResult - - // READ-ONLY - UserStorageKey *string - - // READ-ONLY - UserStorageResourceID *string +// OnlineDeploymentsClientGetLogsOptions contains the optional parameters for the OnlineDeploymentsClient.GetLogs method. +type OnlineDeploymentsClientGetLogsOptions struct { + // placeholder for future optional parameters } -// ListWorkspaceQuotas - The List WorkspaceQuotasByVMFamily operation response. -type ListWorkspaceQuotas struct { - // READ-ONLY; The URI to fetch the next page of workspace quota information by VM Family. Call ListNext() with this to fetch - // the next page of Workspace Quota information. - NextLink *string - - // READ-ONLY; The list of Workspace Quotas by VM Family - Value []*ResourceQuota +// OnlineDeploymentsClientGetOptions contains the optional parameters for the OnlineDeploymentsClient.Get method. +type OnlineDeploymentsClientGetOptions struct { + // placeholder for future optional parameters } -// LiteralJobInput - Literal input type. -type LiteralJobInput struct { - // REQUIRED; [Required] Specifies the type of job. - JobInputType *JobInputType - - // REQUIRED; [Required] Literal value for the input. - Value *string - - // Description for the input. - Description *string +// OnlineDeploymentsClientListOptions contains the optional parameters for the OnlineDeploymentsClient.NewListPager method. +type OnlineDeploymentsClientListOptions struct { + // Ordering of list. + OrderBy *string + // Continuation token for pagination. + Skip *string + // Top of list. + Top *int32 } -// GetJobInput implements the JobInputClassification interface for type LiteralJobInput. -func (l *LiteralJobInput) GetJobInput() *JobInput { - return &JobInput{ - Description: l.Description, - JobInputType: l.JobInputType, - } +// OnlineDeploymentsClientListSKUsOptions contains the optional parameters for the OnlineDeploymentsClient.NewListSKUsPager +// method. +type OnlineDeploymentsClientListSKUsOptions struct { + // Number of Skus to be retrieved in a page of results. + Count *int32 + // Continuation token for pagination. + Skip *string } -type MLFlowModelJobInput struct { - // REQUIRED; [Required] Specifies the type of job. - JobInputType *JobInputType +type OnlineEndpoint struct { + // REQUIRED; The geo-location where the resource lives + Location *string - // REQUIRED; [Required] Input Asset URI. - URI *string + // REQUIRED; [Required] Additional attributes of the entity. + Properties *OnlineEndpointProperties - // Description for the input. - Description *string + // Managed service identity (system assigned and/or user assigned identities) + Identity *ManagedServiceIdentity - // Input Asset Delivery Mode. - Mode *InputDeliveryMode -} + // Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type. + Kind *string -// GetJobInput implements the JobInputClassification interface for type MLFlowModelJobInput. -func (m *MLFlowModelJobInput) GetJobInput() *JobInput { - return &JobInput{ - Description: m.Description, - JobInputType: m.JobInputType, - } -} + // Sku details required for ARM contract for Autoscaling. + SKU *SKU -type MLFlowModelJobOutput struct { - // REQUIRED; [Required] Specifies the type of job. - JobOutputType *JobOutputType + // Resource tags. + Tags map[string]*string - // Description for the output. - Description *string + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string - // Output Asset Delivery Mode. - Mode *OutputDeliveryMode + // READ-ONLY; The name of the resource + Name *string - // Output Asset URI. - URI *string -} + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData -// GetJobOutput implements the JobOutputClassification interface for type MLFlowModelJobOutput. -func (m *MLFlowModelJobOutput) GetJobOutput() *JobOutput { - return &JobOutput{ - Description: m.Description, - JobOutputType: m.JobOutputType, - } + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string } -// MLTableData - MLTable data definition -type MLTableData struct { - // REQUIRED; [Required] Specifies the type of data. - DataType *DataType +// OnlineEndpointProperties - Online endpoint configuration +type OnlineEndpointProperties struct { + // REQUIRED; [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure Machine Learning token-based authentication. + // 'Key' doesn't expire but 'AMLToken' does. + AuthMode *EndpointAuthMode - // REQUIRED; [Required] Uri of the data. Usage/meaning depends on Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20221001.Assets.DataVersionBase.DataType - DataURI *string + // ARM resource ID of the compute if it exists. optional + Compute *string - // The asset description text. + // Description of the inference endpoint. Description *string - // If the name version are system generated (anonymous registration). - IsAnonymous *bool + // EndpointAuthKeys to set initially on an Endpoint. This property will always be returned as null. AuthKey values must be + // retrieved using the ListKeys API. + Keys *EndpointAuthKeys - // Is the asset archived? - IsArchived *bool + // Percentage of traffic to be mirrored to each deployment without using returned scoring. Traffic values need to sum to utmost + // 50. + MirrorTraffic map[string]*int32 - // The asset property dictionary. + // Property dictionary. Properties can be added, but not removed or altered. Properties map[string]*string - // Uris referenced in the MLTable definition (required for lineage) - ReferencedUris []*string + // Set to "Enabled" for endpoints that should allow public access when Private Link is enabled. + PublicNetworkAccess *PublicNetworkAccessType - // Tag dictionary. Tags can be added, removed, and updated. - Tags map[string]*string -} + // Percentage of traffic from endpoint to divert to each deployment. Traffic values need to sum to 100. + Traffic map[string]*int32 -// GetDataVersionBaseProperties implements the DataVersionBasePropertiesClassification interface for type MLTableData. -func (m *MLTableData) GetDataVersionBaseProperties() *DataVersionBaseProperties { - return &DataVersionBaseProperties{ - DataType: m.DataType, - DataURI: m.DataURI, - IsAnonymous: m.IsAnonymous, - IsArchived: m.IsArchived, - Description: m.Description, - Properties: m.Properties, - Tags: m.Tags, - } -} + // READ-ONLY; Provisioning state for the endpoint. + ProvisioningState *EndpointProvisioningState -type MLTableJobInput struct { - // REQUIRED; [Required] Specifies the type of job. - JobInputType *JobInputType + // READ-ONLY; Endpoint URI. + ScoringURI *string - // REQUIRED; [Required] Input Asset URI. - URI *string + // READ-ONLY; Endpoint Swagger URI. + SwaggerURI *string +} - // Description for the input. - Description *string +// OnlineEndpointTrackedResourceArmPaginatedResult - A paginated list of OnlineEndpoint entities. +type OnlineEndpointTrackedResourceArmPaginatedResult struct { + // The link to the next page of OnlineEndpoint objects. If null, there are no additional pages. + NextLink *string - // Input Asset Delivery Mode. - Mode *InputDeliveryMode + // An array of objects of type OnlineEndpoint. + Value []*OnlineEndpoint } -// GetJobInput implements the JobInputClassification interface for type MLTableJobInput. -func (m *MLTableJobInput) GetJobInput() *JobInput { - return &JobInput{ - Description: m.Description, - JobInputType: m.JobInputType, - } +// OnlineEndpointsClientBeginCreateOrUpdateOptions contains the optional parameters for the OnlineEndpointsClient.BeginCreateOrUpdate +// method. +type OnlineEndpointsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -type MLTableJobOutput struct { - // REQUIRED; [Required] Specifies the type of job. - JobOutputType *JobOutputType - - // Description for the output. - Description *string - - // Output Asset Delivery Mode. - Mode *OutputDeliveryMode - - // Output Asset URI. - URI *string +// OnlineEndpointsClientBeginDeleteOptions contains the optional parameters for the OnlineEndpointsClient.BeginDelete method. +type OnlineEndpointsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// GetJobOutput implements the JobOutputClassification interface for type MLTableJobOutput. -func (m *MLTableJobOutput) GetJobOutput() *JobOutput { - return &JobOutput{ - Description: m.Description, - JobOutputType: m.JobOutputType, - } +// OnlineEndpointsClientBeginRegenerateKeysOptions contains the optional parameters for the OnlineEndpointsClient.BeginRegenerateKeys +// method. +type OnlineEndpointsClientBeginRegenerateKeysOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// ManagedIdentity - Managed identity configuration. -type ManagedIdentity struct { - // REQUIRED; [Required] Specifies the type of identity framework. - IdentityType *IdentityConfigurationType - - // Specifies a user-assigned identity by client ID. For system-assigned, do not set this field. - ClientID *string - - // Specifies a user-assigned identity by object ID. For system-assigned, do not set this field. - ObjectID *string - - // Specifies a user-assigned identity by ARM resource ID. For system-assigned, do not set this field. - ResourceID *string +// OnlineEndpointsClientBeginUpdateOptions contains the optional parameters for the OnlineEndpointsClient.BeginUpdate method. +type OnlineEndpointsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// GetIdentityConfiguration implements the IdentityConfigurationClassification interface for type ManagedIdentity. -func (m *ManagedIdentity) GetIdentityConfiguration() *IdentityConfiguration { - return &IdentityConfiguration{ - IdentityType: m.IdentityType, - } +// OnlineEndpointsClientGetOptions contains the optional parameters for the OnlineEndpointsClient.Get method. +type OnlineEndpointsClientGetOptions struct { + // placeholder for future optional parameters } -type ManagedIdentityAuthTypeWorkspaceConnectionProperties struct { - // REQUIRED; Authentication type of the connection target - AuthType *ConnectionAuthType - - // Category of the connection - Category *ConnectionCategory - Credentials *WorkspaceConnectionManagedIdentity - Target *string - - // Value details of the workspace connection. - Value *string - - // format for the workspace connection value - ValueFormat *ValueFormat +// OnlineEndpointsClientGetTokenOptions contains the optional parameters for the OnlineEndpointsClient.GetToken method. +type OnlineEndpointsClientGetTokenOptions struct { + // placeholder for future optional parameters } -// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type ManagedIdentityAuthTypeWorkspaceConnectionProperties. -func (m *ManagedIdentityAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { - return &WorkspaceConnectionPropertiesV2{ - AuthType: m.AuthType, - Category: m.Category, - Target: m.Target, - Value: m.Value, - ValueFormat: m.ValueFormat, - } +// OnlineEndpointsClientListKeysOptions contains the optional parameters for the OnlineEndpointsClient.ListKeys method. +type OnlineEndpointsClientListKeysOptions struct { + // placeholder for future optional parameters } -// ManagedOnlineDeployment - Properties specific to a ManagedOnlineDeployment. -type ManagedOnlineDeployment struct { - // REQUIRED; [Required] The compute type of the endpoint. - EndpointComputeType *EndpointComputeType - - // If true, enables Application Insights logging. - AppInsightsEnabled *bool - - // Code configuration for the endpoint deployment. - CodeConfiguration *CodeConfiguration - - // Description of the endpoint deployment. - Description *string +// OnlineEndpointsClientListOptions contains the optional parameters for the OnlineEndpointsClient.NewListPager method. +type OnlineEndpointsClientListOptions struct { + // EndpointComputeType to be filtered by. + ComputeType *EndpointComputeType + // Number of endpoints to be retrieved in a page of results. + Count *int32 + // Name of the endpoint. + Name *string + // The option to order the response. + OrderBy *OrderString + // A set of properties with which to filter the returned models. It is a comma separated string of properties key and/or properties + // key=value Example: propKey1,propKey2,propKey3=value3 . + Properties *string + // Continuation token for pagination. + Skip *string + // A set of tags with which to filter the returned models. It is a comma separated string of tags key or tags key=value. Example: + // tagKey1,tagKey2,tagKey3=value3 . + Tags *string +} - // If Enabled, allow egress public network access. If Disabled, this will create secure egress. Default: Enabled. - EgressPublicNetworkAccess *EgressPublicNetworkAccessType +// OnlineInferenceConfiguration - Online inference configuration options. +type OnlineInferenceConfiguration struct { + // Additional configurations + Configurations map[string]*string - // ARM resource ID or AssetId of the environment specification for the endpoint deployment. - EnvironmentID *string + // Entry script or command to invoke. + EntryScript *string - // Environment variables configuration for the deployment. - EnvironmentVariables map[string]*string + // The route to check the liveness of the inference server container. + LivenessRoute *Route - // Compute instance type. - InstanceType *string + // The route to check the readiness of the inference server container. + ReadinessRoute *Route - // Liveness probe monitors the health of the container regularly. - LivenessProbe *ProbeSettings + // The port to send the scoring requests to, within the inference server container. + ScoringRoute *Route +} - // The URI path to the model. - Model *string +// OnlineRequestSettings - Online deployment scoring requests configuration. +type OnlineRequestSettings struct { + // The number of maximum concurrent requests per node allowed per deployment. Defaults to 1. + MaxConcurrentRequestsPerInstance *int32 - // The path to mount the model in custom container. - ModelMountPath *string + // The maximum amount of time a request will stay in the queue in ISO 8601 format. Defaults to 500ms. + MaxQueueWait *string - // Property dictionary. Properties can be added, but not removed or altered. - Properties map[string]*string + // The scoring timeout in ISO 8601 format. Defaults to 5000ms. + RequestTimeout *string +} - // Readiness probe validates if the container is ready to serve traffic. The properties and defaults are the same as liveness - // probe. - ReadinessProbe *ProbeSettings +// OnlineScaleSettingsClassification provides polymorphic access to related types. +// Call the interface's GetOnlineScaleSettings() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *DefaultScaleSettings, *OnlineScaleSettings, *TargetUtilizationScaleSettings +type OnlineScaleSettingsClassification interface { + // GetOnlineScaleSettings returns the OnlineScaleSettings content of the underlying type. + GetOnlineScaleSettings() *OnlineScaleSettings +} - // Request settings for the deployment. - RequestSettings *OnlineRequestSettings +// OnlineScaleSettings - Online deployment scaling configuration. +type OnlineScaleSettings struct { + // REQUIRED; [Required] Type of deployment scaling algorithm + ScaleType *ScaleType +} - // Scale settings for the deployment. If it is null or not provided, it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment - // and to DefaultScaleSettings for ManagedOnlineDeployment. - ScaleSettings OnlineScaleSettingsClassification +// GetOnlineScaleSettings implements the OnlineScaleSettingsClassification interface for type OnlineScaleSettings. +func (o *OnlineScaleSettings) GetOnlineScaleSettings() *OnlineScaleSettings { return o } - // READ-ONLY; Provisioning state for the endpoint deployment. - ProvisioningState *DeploymentProvisioningState -} +// OperationDisplay - Display name of operation +type OperationDisplay struct { + // Gets or sets the description for the operation. + Description *string -// GetOnlineDeploymentProperties implements the OnlineDeploymentPropertiesClassification interface for type ManagedOnlineDeployment. -func (m *ManagedOnlineDeployment) GetOnlineDeploymentProperties() *OnlineDeploymentProperties { - return &OnlineDeploymentProperties{ - AppInsightsEnabled: m.AppInsightsEnabled, - EgressPublicNetworkAccess: m.EgressPublicNetworkAccess, - EndpointComputeType: m.EndpointComputeType, - InstanceType: m.InstanceType, - LivenessProbe: m.LivenessProbe, - Model: m.Model, - ModelMountPath: m.ModelMountPath, - ProvisioningState: m.ProvisioningState, - ReadinessProbe: m.ReadinessProbe, - RequestSettings: m.RequestSettings, - ScaleSettings: m.ScaleSettings, - CodeConfiguration: m.CodeConfiguration, - Description: m.Description, - EnvironmentID: m.EnvironmentID, - EnvironmentVariables: m.EnvironmentVariables, - Properties: m.Properties, - } -} + // Gets or sets the operation that users can perform. + Operation *string -// ManagedServiceIdentity - Managed service identity (system assigned and/or user assigned identities) -type ManagedServiceIdentity struct { - // REQUIRED; Type of managed service identity (where both SystemAssigned and UserAssigned types are allowed). - Type *ManagedServiceIdentityType + // Gets or sets the resource provider name: Microsoft.MachineLearningExperimentation + Provider *string - // The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM - // resource ids in the form: - // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. - // The dictionary values can be empty objects ({}) in - // requests. - UserAssignedIdentities map[string]*UserAssignedIdentity + // Gets or sets the resource on which the operation is performed. + Resource *string +} - // READ-ONLY; The service principal ID of the system assigned identity. This property will only be provided for a system assigned - // identity. - PrincipalID *string +// OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. +type OperationsClientListOptions struct { + // placeholder for future optional parameters +} - // READ-ONLY; The tenant ID of the system assigned identity. This property will only be provided for a system assigned identity. - TenantID *string +// OutboundRuleClassification provides polymorphic access to related types. +// Call the interface's GetOutboundRule() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *FqdnOutboundRule, *OutboundRule, *PrivateEndpointOutboundRule, *ServiceTagOutboundRule +type OutboundRuleClassification interface { + // GetOutboundRule returns the OutboundRule content of the underlying type. + GetOutboundRule() *OutboundRule } -// MedianStoppingPolicy - Defines an early termination policy based on running averages of the primary metric of all runs -type MedianStoppingPolicy struct { - // REQUIRED; [Required] Name of policy configuration - PolicyType *EarlyTerminationPolicyType +// OutboundRule - Outbound Rule for the managed network of a machine learning workspace. +type OutboundRule struct { + // REQUIRED; Type of a managed network Outbound Rule of a machine learning workspace. + Type *RuleType - // Number of intervals by which to delay the first evaluation. - DelayEvaluation *int32 + // Category of a managed network Outbound Rule of a machine learning workspace. + Category *RuleCategory - // Interval (number of runs) between policy evaluations. - EvaluationInterval *int32 + // Type of a managed network Outbound Rule of a machine learning workspace. + Status *RuleStatus } -// GetEarlyTerminationPolicy implements the EarlyTerminationPolicyClassification interface for type MedianStoppingPolicy. -func (m *MedianStoppingPolicy) GetEarlyTerminationPolicy() *EarlyTerminationPolicy { - return &EarlyTerminationPolicy{ - DelayEvaluation: m.DelayEvaluation, - EvaluationInterval: m.EvaluationInterval, - PolicyType: m.PolicyType, - } -} +// GetOutboundRule implements the OutboundRuleClassification interface for type OutboundRule. +func (o *OutboundRule) GetOutboundRule() *OutboundRule { return o } -// ModelContainer - Azure Resource Manager resource envelope. -type ModelContainer struct { - // REQUIRED; [Required] Additional attributes of the entity. - Properties *ModelContainerProperties +// OutboundRuleBasicResource - Outbound Rule Basic Resource for the managed network of a machine learning workspace. +type OutboundRuleBasicResource struct { + // REQUIRED; Outbound Rule for the managed network of a machine learning workspace. + Properties OutboundRuleClassification // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} ID *string @@ -5506,504 +9469,621 @@ type ModelContainer struct { Type *string } -type ModelContainerProperties struct { - // The asset description text. - Description *string +// OutboundRuleListResult - List of outbound rules for the managed network of a machine learning workspace. +type OutboundRuleListResult struct { + // The link to the next page constructed using the continuationToken. If null, there are no additional pages. + NextLink *string - // Is the asset archived? - IsArchived *bool + // The list of machine learning workspaces. Since this list may be incomplete, the nextLink field should be used to request + // the next list of machine learning workspaces. + Value []*OutboundRuleBasicResource +} - // The asset property dictionary. - Properties map[string]*string +// OutputPathAssetReference - Reference to an asset via its path in a job output. +type OutputPathAssetReference struct { + // REQUIRED; [Required] Specifies the type of asset reference. + ReferenceType *ReferenceType - // Tag dictionary. Tags can be added, removed, and updated. - Tags map[string]*string + // ARM resource ID of the job. + JobID *string - // READ-ONLY; The latest version inside this container. - LatestVersion *string + // The path of the file/directory in the job output. + Path *string +} - // READ-ONLY; The next auto incremental version - NextVersion *string +// GetAssetReferenceBase implements the AssetReferenceBaseClassification interface for type OutputPathAssetReference. +func (o *OutputPathAssetReference) GetAssetReferenceBase() *AssetReferenceBase { + return &AssetReferenceBase{ + ReferenceType: o.ReferenceType, + } } -// ModelContainerResourceArmPaginatedResult - A paginated list of ModelContainer entities. -type ModelContainerResourceArmPaginatedResult struct { - // The link to the next page of ModelContainer objects. If null, there are no additional pages. - NextLink *string +type PATAuthTypeWorkspaceConnectionProperties struct { + // REQUIRED; Authentication type of the connection target + AuthType *ConnectionAuthType - // An array of objects of type ModelContainer. - Value []*ModelContainer -} + // Category of the connection + Category *ConnectionCategory + Credentials *WorkspaceConnectionPersonalAccessToken + ExpiryTime *time.Time -// ModelContainersClientCreateOrUpdateOptions contains the optional parameters for the ModelContainersClient.CreateOrUpdate -// method. -type ModelContainersClientCreateOrUpdateOptions struct { - // placeholder for future optional parameters + // Anything + Metadata any + Target *string } -// ModelContainersClientDeleteOptions contains the optional parameters for the ModelContainersClient.Delete method. -type ModelContainersClientDeleteOptions struct { - // placeholder for future optional parameters +// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type PATAuthTypeWorkspaceConnectionProperties. +func (p *PATAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { + return &WorkspaceConnectionPropertiesV2{ + AuthType: p.AuthType, + Category: p.Category, + ExpiryTime: p.ExpiryTime, + Metadata: p.Metadata, + Target: p.Target, + } } -// ModelContainersClientGetOptions contains the optional parameters for the ModelContainersClient.Get method. -type ModelContainersClientGetOptions struct { - // placeholder for future optional parameters +// PackageInputPathBaseClassification provides polymorphic access to related types. +// Call the interface's GetPackageInputPathBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *PackageInputPathBase, *PackageInputPathID, *PackageInputPathURL, *PackageInputPathVersion +type PackageInputPathBaseClassification interface { + // GetPackageInputPathBase returns the PackageInputPathBase content of the underlying type. + GetPackageInputPathBase() *PackageInputPathBase } -// ModelContainersClientListOptions contains the optional parameters for the ModelContainersClient.NewListPager method. -type ModelContainersClientListOptions struct { - // Maximum number of results to return. - Count *int32 - // View type for including/excluding (for example) archived entities. - ListViewType *ListViewType - // Continuation token for pagination. - Skip *string +type PackageInputPathBase struct { + // REQUIRED; [Required] Input path type for package inputs. + InputPathType *InputPathType } -// ModelVersion - Azure Resource Manager resource envelope. -type ModelVersion struct { - // REQUIRED; [Required] Additional attributes of the entity. - Properties *ModelVersionProperties +// GetPackageInputPathBase implements the PackageInputPathBaseClassification interface for type PackageInputPathBase. +func (p *PackageInputPathBase) GetPackageInputPathBase() *PackageInputPathBase { return p } - // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string +// PackageInputPathID - Package input path specified with a resource id. +type PackageInputPathID struct { + // REQUIRED; [Required] Input path type for package inputs. + InputPathType *InputPathType - // READ-ONLY; The name of the resource - Name *string + // Input resource id. + ResourceID *string +} - // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData +// GetPackageInputPathBase implements the PackageInputPathBaseClassification interface for type PackageInputPathID. +func (p *PackageInputPathID) GetPackageInputPathBase() *PackageInputPathBase { + return &PackageInputPathBase{ + InputPathType: p.InputPathType, + } +} - // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string +// PackageInputPathURL - Package input path specified as an url. +type PackageInputPathURL struct { + // REQUIRED; [Required] Input path type for package inputs. + InputPathType *InputPathType + + // Input path url. + URL *string } -// ModelVersionProperties - Model asset version details. -type ModelVersionProperties struct { - // The asset description text. - Description *string +// GetPackageInputPathBase implements the PackageInputPathBaseClassification interface for type PackageInputPathURL. +func (p *PackageInputPathURL) GetPackageInputPathBase() *PackageInputPathBase { + return &PackageInputPathBase{ + InputPathType: p.InputPathType, + } +} - // Mapping of model flavors to their properties. - Flavors map[string]*FlavorData +// PackageInputPathVersion - Package input path specified with name and version. +type PackageInputPathVersion struct { + // REQUIRED; [Required] Input path type for package inputs. + InputPathType *InputPathType - // If the name version are system generated (anonymous registration). - IsAnonymous *bool + // Input resource name. + ResourceName *string - // Is the asset archived? - IsArchived *bool + // Input resource version. + ResourceVersion *string +} - // Name of the training job which produced this model - JobName *string +// GetPackageInputPathBase implements the PackageInputPathBaseClassification interface for type PackageInputPathVersion. +func (p *PackageInputPathVersion) GetPackageInputPathBase() *PackageInputPathBase { + return &PackageInputPathBase{ + InputPathType: p.InputPathType, + } +} - // The storage format for this entity. Used for NCD. - ModelType *string +// PackageRequest - Model package operation request properties. +type PackageRequest struct { + // REQUIRED; [Required] Inferencing server configurations. + InferencingServer InferencingServerClassification - // The URI path to the model contents. - ModelURI *string + // REQUIRED; [Required] Arm ID of the target environment to be created by package operation. + TargetEnvironmentID *string - // The asset property dictionary. - Properties map[string]*string + // Base environment to start with. + BaseEnvironmentSource BaseEnvironmentSourceClassification + + // Collection of environment variables. + EnvironmentVariables map[string]*string + + // Collection of inputs. + Inputs []*ModelPackageInput + + // Model configuration including the mount mode. + ModelConfiguration *ModelConfiguration // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string } -// ModelVersionResourceArmPaginatedResult - A paginated list of ModelVersion entities. -type ModelVersionResourceArmPaginatedResult struct { - // The link to the next page of ModelVersion objects. If null, there are no additional pages. - NextLink *string +// PackageResponse - Package response returned after async package operation completes successfully. +type PackageResponse struct { + // READ-ONLY; Base environment to start with. + BaseEnvironmentSource BaseEnvironmentSourceClassification - // An array of objects of type ModelVersion. - Value []*ModelVersion -} + // READ-ONLY; Build id of the image build operation. + BuildID *string -// ModelVersionsClientCreateOrUpdateOptions contains the optional parameters for the ModelVersionsClient.CreateOrUpdate method. -type ModelVersionsClientCreateOrUpdateOptions struct { - // placeholder for future optional parameters -} + // READ-ONLY; Build state of the image build operation. + BuildState *PackageBuildState -// ModelVersionsClientDeleteOptions contains the optional parameters for the ModelVersionsClient.Delete method. -type ModelVersionsClientDeleteOptions struct { - // placeholder for future optional parameters + // READ-ONLY; Collection of environment variables. + EnvironmentVariables map[string]*string + + // READ-ONLY; Inferencing server configurations. + InferencingServer InferencingServerClassification + + // READ-ONLY; Collection of inputs. + Inputs []*ModelPackageInput + + // READ-ONLY; Log url of the image build operation. + LogURL *string + + // READ-ONLY; Model configuration including the mount mode. + ModelConfiguration *ModelConfiguration + + // READ-ONLY; Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string + + // READ-ONLY; Asset ID of the target environment created by package operation. + TargetEnvironmentID *string } -// ModelVersionsClientGetOptions contains the optional parameters for the ModelVersionsClient.Get method. -type ModelVersionsClientGetOptions struct { - // placeholder for future optional parameters +// PaginatedComputeResourcesList - Paginated list of Machine Learning compute objects wrapped in ARM resource envelope. +type PaginatedComputeResourcesList struct { + // A continuation link (absolute URI) to the next page of results in the list. + NextLink *string + + // An array of Machine Learning compute objects wrapped in ARM resource envelope. + Value []*ComputeResource } -// ModelVersionsClientListOptions contains the optional parameters for the ModelVersionsClient.NewListPager method. -type ModelVersionsClientListOptions struct { - // Model description. +// PartialBatchDeployment - Mutable batch inference settings per deployment. +type PartialBatchDeployment struct { + // Description of the endpoint deployment. Description *string - // Name of the feed. - Feed *string - // View type for including/excluding (for example) archived entities. - ListViewType *ListViewType - // Number of initial results to skip. - Offset *int32 - // Ordering of list. - OrderBy *string - // Comma-separated list of property names (and optionally values). Example: prop1,prop2=value2 - Properties *string - // Continuation token for pagination. - Skip *string - // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 - Tags *string - // Maximum number of records to return. - Top *int32 - // Model version. - Version *string } -// Mpi - MPI distribution configuration. -type Mpi struct { - // REQUIRED; [Required] Specifies the type of distribution framework. - DistributionType *DistributionType +// PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties - Strictly used in update requests. +type PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties struct { + // Additional attributes of the entity. + Properties *PartialBatchDeployment - // Number of processes per MPI node. - ProcessCountPerInstance *int32 + // Resource tags. + Tags map[string]*string } -// GetDistributionConfiguration implements the DistributionConfigurationClassification interface for type Mpi. -func (m *Mpi) GetDistributionConfiguration() *DistributionConfiguration { - return &DistributionConfiguration{ - DistributionType: m.DistributionType, - } +// PartialJobBase - Mutable base definition for a job. +type PartialJobBase struct { + // Mutable notification setting for the job + NotificationSetting *PartialNotificationSetting } -// NCrossValidationsClassification provides polymorphic access to related types. -// Call the interface's GetNCrossValidations() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *AutoNCrossValidations, *CustomNCrossValidations, *NCrossValidations -type NCrossValidationsClassification interface { - // GetNCrossValidations returns the NCrossValidations content of the underlying type. - GetNCrossValidations() *NCrossValidations +// PartialJobBasePartialResource - Azure Resource Manager resource envelope strictly used in update requests. +type PartialJobBasePartialResource struct { + // Additional attributes of the entity. + Properties *PartialJobBase } -// NCrossValidations - N-Cross validations value. -type NCrossValidations struct { - // REQUIRED; [Required] Mode for determining N-Cross validations. - Mode *NCrossValidationsMode +// PartialManagedServiceIdentity - Managed service identity (system assigned and/or user assigned identities) +type PartialManagedServiceIdentity struct { + // Managed service identity (system assigned and/or user assigned identities) + Type *ManagedServiceIdentityType + + // The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM + // resource ids in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. + // The dictionary values can be empty objects ({}) in + // requests. + UserAssignedIdentities map[string]any } -// GetNCrossValidations implements the NCrossValidationsClassification interface for type NCrossValidations. -func (n *NCrossValidations) GetNCrossValidations() *NCrossValidations { return n } +// PartialMinimalTrackedResource - Strictly used in update requests. +type PartialMinimalTrackedResource struct { + // Resource tags. + Tags map[string]*string +} -// NlpVertical - Abstract class for NLP related AutoML tasks. NLP - Natural Language Processing. -type NlpVertical struct { - // Featurization inputs needed for AutoML job. - FeaturizationSettings *NlpVerticalFeaturizationSettings +// PartialMinimalTrackedResourceWithIdentity - Strictly used in update requests. +type PartialMinimalTrackedResourceWithIdentity struct { + // Managed service identity (system assigned and/or user assigned identities) + Identity *PartialManagedServiceIdentity + + // Resource tags. + Tags map[string]*string +} - // Execution constraints for AutoMLJob. - LimitSettings *NlpVerticalLimitSettings +// PartialMinimalTrackedResourceWithSKU - Strictly used in update requests. +type PartialMinimalTrackedResourceWithSKU struct { + // Sku details required for ARM contract for Autoscaling. + SKU *PartialSKU - // Validation data inputs. - ValidationData *MLTableJobInput + // Resource tags. + Tags map[string]*string } -type NlpVerticalFeaturizationSettings struct { - // Dataset language, useful for the text data. - DatasetLanguage *string +// PartialNotificationSetting - Mutable configuration for notification. +type PartialNotificationSetting struct { + // Send webhook callback to a service. Key is a user-provided name for the webhook. + Webhooks map[string]WebhookClassification } -// NlpVerticalLimitSettings - Job execution constraints. -type NlpVerticalLimitSettings struct { - // Maximum Concurrent AutoML iterations. - MaxConcurrentTrials *int32 +// PartialRegistryPartialTrackedResource - Strictly used in update requests. +type PartialRegistryPartialTrackedResource struct { + // Managed service identity (system assigned and/or user assigned identities) + Identity *RegistryPartialManagedServiceIdentity - // Number of AutoML iterations. - MaxTrials *int32 + // Sku details required for ARM contract for Autoscaling. + SKU *PartialSKU - // AutoML job timeout. - Timeout *string + // Resource tags. + Tags map[string]*string } -// NodeStateCounts - Counts of various compute node states on the amlCompute. -type NodeStateCounts struct { - // READ-ONLY; Number of compute nodes in idle state. - IdleNodeCount *int32 - - // READ-ONLY; Number of compute nodes which are leaving the amlCompute. - LeavingNodeCount *int32 +// PartialSKU - Common SKU definition. +type PartialSKU struct { + // If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the + // resource this may be omitted. + Capacity *int32 - // READ-ONLY; Number of compute nodes which are in preempted state. - PreemptedNodeCount *int32 + // If the service has different generations of hardware, for the same SKU, then that can be captured here. + Family *string - // READ-ONLY; Number of compute nodes which are being prepared. - PreparingNodeCount *int32 + // The name of the SKU. Ex - P3. It is typically a letter+number code. + Name *string - // READ-ONLY; Number of compute nodes which are running jobs. - RunningNodeCount *int32 + // The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code. + Size *string - // READ-ONLY; Number of compute nodes which are in unusable state. - UnusableNodeCount *int32 + // This field is required to be implemented by the Resource Provider if the service has more than one tier, but is not required + // on a PUT. + Tier *SKUTier } -type NoneAuthTypeWorkspaceConnectionProperties struct { - // REQUIRED; Authentication type of the connection target - AuthType *ConnectionAuthType - - // Category of the connection - Category *ConnectionCategory - Target *string +type Password struct { + // READ-ONLY + Name *string - // Value details of the workspace connection. + // READ-ONLY Value *string - - // format for the workspace connection value - ValueFormat *ValueFormat } -// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type NoneAuthTypeWorkspaceConnectionProperties. -func (n *NoneAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { - return &WorkspaceConnectionPropertiesV2{ - AuthType: n.AuthType, - Category: n.Category, - Target: n.Target, - Value: n.Value, - ValueFormat: n.ValueFormat, - } +// PendingUploadCredentialDtoClassification provides polymorphic access to related types. +// Call the interface's GetPendingUploadCredentialDto() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *PendingUploadCredentialDto, *SASCredentialDto +type PendingUploadCredentialDtoClassification interface { + // GetPendingUploadCredentialDto returns the PendingUploadCredentialDto content of the underlying type. + GetPendingUploadCredentialDto() *PendingUploadCredentialDto } -// NoneDatastoreCredentials - Empty/none datastore credentials. -type NoneDatastoreCredentials struct { +type PendingUploadCredentialDto struct { // REQUIRED; [Required] Credential type used to authentication with storage. - CredentialsType *CredentialsType + CredentialType *PendingUploadCredentialType } -// GetDatastoreCredentials implements the DatastoreCredentialsClassification interface for type NoneDatastoreCredentials. -func (n *NoneDatastoreCredentials) GetDatastoreCredentials() *DatastoreCredentials { - return &DatastoreCredentials{ - CredentialsType: n.CredentialsType, - } +// GetPendingUploadCredentialDto implements the PendingUploadCredentialDtoClassification interface for type PendingUploadCredentialDto. +func (p *PendingUploadCredentialDto) GetPendingUploadCredentialDto() *PendingUploadCredentialDto { + return p } -type NotebookAccessTokenResult struct { - // READ-ONLY - AccessToken *string +type PendingUploadRequestDto struct { + // If PendingUploadId = null then random guid will be used. + PendingUploadID *string - // READ-ONLY - ExpiresIn *int32 + // TemporaryBlobReference is the only supported type + PendingUploadType *PendingUploadType +} - // READ-ONLY - HostName *string +type PendingUploadResponseDto struct { + // Container level read, write, list SAS + BlobReferenceForConsumption *BlobReferenceForConsumptionDto - // READ-ONLY - NotebookResourceID *string + // ID for this upload request + PendingUploadID *string - // READ-ONLY - PublicDNS *string + // TemporaryBlobReference is the only supported type + PendingUploadType *PendingUploadType +} - // READ-ONLY - RefreshToken *string +// PersonalComputeInstanceSettings - Settings for a personal compute instance. +type PersonalComputeInstanceSettings struct { + // A user explicitly assigned to a personal compute instance. + AssignedUser *AssignedUser +} - // READ-ONLY - Scope *string +// PipelineJob - Pipeline Job definition: defines generic to MFE attributes. +type PipelineJob struct { + // REQUIRED; [Required] Specifies the type of job. + JobType *JobType - // READ-ONLY - TokenType *string -} + // ARM resource ID of the component resource. + ComponentID *string -type NotebookPreparationError struct { - ErrorMessage *string - StatusCode *int32 -} + // ARM resource ID of the compute resource. + ComputeID *string -type NotebookResourceInfo struct { - Fqdn *string + // The asset description text. + Description *string - // The error that occurs when preparing notebook. - NotebookPreparationError *NotebookPreparationError + // Display name of job. + DisplayName *string - // the data plane resourceId that used to initialize notebook component - ResourceID *string -} + // The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment. + ExperimentName *string -// Objective - Optimization objective. -type Objective struct { - // REQUIRED; [Required] Defines supported metric goals for hyperparameter tuning - Goal *Goal + // Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken + // if null. + Identity IdentityConfigurationClassification - // REQUIRED; [Required] Name of the metric to optimize. - PrimaryMetric *string -} + // Inputs for the pipeline job. + Inputs map[string]JobInputClassification -type OnlineDeployment struct { - // REQUIRED; The geo-location where the resource lives - Location *string + // Is the asset archived? + IsArchived *bool - // REQUIRED; [Required] Additional attributes of the entity. - Properties OnlineDeploymentPropertiesClassification + // Jobs construct the Pipeline Job. + Jobs map[string]any - // Managed service identity (system assigned and/or user assigned identities) - Identity *ManagedServiceIdentity + // Notification setting for the job + NotificationSetting *NotificationSetting - // Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type. - Kind *string + // Outputs for the pipeline job + Outputs map[string]JobOutputClassification - // Sku details required for ARM contract for Autoscaling. - SKU *SKU + // The asset property dictionary. + Properties map[string]*string - // Resource tags. - Tags map[string]*string + // Configuration for secrets to be made available during runtime. + SecretsConfiguration map[string]*SecretConfiguration - // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string + // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. + Services map[string]*JobService - // READ-ONLY; The name of the resource - Name *string + // Pipeline settings, for things like ContinueRunOnStepFailure etc. + Settings any - // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData + // ARM resource ID of source job. + SourceJobID *string - // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string -} + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string -// OnlineDeploymentPropertiesClassification provides polymorphic access to related types. -// Call the interface's GetOnlineDeploymentProperties() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *KubernetesOnlineDeployment, *ManagedOnlineDeployment, *OnlineDeploymentProperties -type OnlineDeploymentPropertiesClassification interface { - // GetOnlineDeploymentProperties returns the OnlineDeploymentProperties content of the underlying type. - GetOnlineDeploymentProperties() *OnlineDeploymentProperties + // READ-ONLY; Status of the job. + Status *JobStatus } -type OnlineDeploymentProperties struct { - // REQUIRED; [Required] The compute type of the endpoint. - EndpointComputeType *EndpointComputeType +// GetJobBaseProperties implements the JobBasePropertiesClassification interface for type PipelineJob. +func (p *PipelineJob) GetJobBaseProperties() *JobBaseProperties { + return &JobBaseProperties{ + ComponentID: p.ComponentID, + ComputeID: p.ComputeID, + DisplayName: p.DisplayName, + ExperimentName: p.ExperimentName, + Identity: p.Identity, + IsArchived: p.IsArchived, + JobType: p.JobType, + NotificationSetting: p.NotificationSetting, + SecretsConfiguration: p.SecretsConfiguration, + Services: p.Services, + Status: p.Status, + Description: p.Description, + Properties: p.Properties, + Tags: p.Tags, + } +} - // If true, enables Application Insights logging. - AppInsightsEnabled *bool +// PredictionDriftMetricThresholdBaseClassification provides polymorphic access to related types. +// Call the interface's GetPredictionDriftMetricThresholdBase() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *CategoricalPredictionDriftMetricThreshold, *NumericalPredictionDriftMetricThreshold, *PredictionDriftMetricThresholdBase +type PredictionDriftMetricThresholdBaseClassification interface { + // GetPredictionDriftMetricThresholdBase returns the PredictionDriftMetricThresholdBase content of the underlying type. + GetPredictionDriftMetricThresholdBase() *PredictionDriftMetricThresholdBase +} - // Code configuration for the endpoint deployment. - CodeConfiguration *CodeConfiguration +type PredictionDriftMetricThresholdBase struct { + // REQUIRED; [Required] Specifies the data type of the metric threshold. + DataType *MonitoringFeatureDataType - // Description of the endpoint deployment. - Description *string + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} - // If Enabled, allow egress public network access. If Disabled, this will create secure egress. Default: Enabled. - EgressPublicNetworkAccess *EgressPublicNetworkAccessType +// GetPredictionDriftMetricThresholdBase implements the PredictionDriftMetricThresholdBaseClassification interface for type +// PredictionDriftMetricThresholdBase. +func (p *PredictionDriftMetricThresholdBase) GetPredictionDriftMetricThresholdBase() *PredictionDriftMetricThresholdBase { + return p +} - // ARM resource ID or AssetId of the environment specification for the endpoint deployment. - EnvironmentID *string +type PredictionDriftMonitoringSignal struct { + // REQUIRED; [Required] A list of metrics to calculate and their associated thresholds. + MetricThresholds []PredictionDriftMetricThresholdBaseClassification - // Environment variables configuration for the deployment. - EnvironmentVariables map[string]*string + // REQUIRED; [Required] The type of the model monitored. + ModelType *MonitoringModelType - // Compute instance type. - InstanceType *string + // REQUIRED; [Required] The data which drift will be calculated for. + ProductionData MonitoringInputDataBaseClassification - // Liveness probe monitors the health of the container regularly. - LivenessProbe *ProbeSettings + // REQUIRED; [Required] The data to calculate drift against. + ReferenceData MonitoringInputDataBaseClassification - // The URI path to the model. - Model *string + // REQUIRED; [Required] Specifies the type of signal to monitor. + SignalType *MonitoringSignalType - // The path to mount the model in custom container. - ModelMountPath *string + // The current notification mode for this signal. + Mode *MonitoringNotificationMode // Property dictionary. Properties can be added, but not removed or altered. Properties map[string]*string +} - // Readiness probe validates if the container is ready to serve traffic. The properties and defaults are the same as liveness - // probe. - ReadinessProbe *ProbeSettings +// GetMonitoringSignalBase implements the MonitoringSignalBaseClassification interface for type PredictionDriftMonitoringSignal. +func (p *PredictionDriftMonitoringSignal) GetMonitoringSignalBase() *MonitoringSignalBase { + return &MonitoringSignalBase{ + Mode: p.Mode, + Properties: p.Properties, + SignalType: p.SignalType, + } +} - // Request settings for the deployment. - RequestSettings *OnlineRequestSettings +// PrivateEndpoint - The Private Endpoint resource. +type PrivateEndpoint struct { + // READ-ONLY; The ARM identifier for Private Endpoint + ID *string +} - // Scale settings for the deployment. If it is null or not provided, it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment - // and to DefaultScaleSettings for ManagedOnlineDeployment. - ScaleSettings OnlineScaleSettingsClassification +// PrivateEndpointConnection - The Private Endpoint Connection resource. +type PrivateEndpointConnection struct { + // Managed service identity (system assigned and/or user assigned identities) + Identity *ManagedServiceIdentity + + // Same as workspace location. + Location *string + + // Private endpoint connection properties. + Properties *PrivateEndpointConnectionProperties + + // Optional. This field is required to be implemented by the RP because AML is supporting more than one tier + SKU *SKU + + // Dictionary of + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData - // READ-ONLY; Provisioning state for the endpoint deployment. - ProvisioningState *DeploymentProvisioningState + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string } -// GetOnlineDeploymentProperties implements the OnlineDeploymentPropertiesClassification interface for type OnlineDeploymentProperties. -func (o *OnlineDeploymentProperties) GetOnlineDeploymentProperties() *OnlineDeploymentProperties { - return o +// PrivateEndpointConnectionListResult - List of private endpoint connection associated with the specified workspace +type PrivateEndpointConnectionListResult struct { + // Array of private endpoint connections + Value []*PrivateEndpointConnection } -// OnlineDeploymentTrackedResourceArmPaginatedResult - A paginated list of OnlineDeployment entities. -type OnlineDeploymentTrackedResourceArmPaginatedResult struct { - // The link to the next page of OnlineDeployment objects. If null, there are no additional pages. - NextLink *string +// PrivateEndpointConnectionProperties - Private endpoint connection properties. +type PrivateEndpointConnectionProperties struct { + // The Private Endpoint resource. + PrivateEndpoint *WorkspacePrivateEndpointResource - // An array of objects of type OnlineDeployment. - Value []*OnlineDeployment + // The connection state. + PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState + + // READ-ONLY; The current provisioning state. + ProvisioningState *PrivateEndpointConnectionProvisioningState } -// OnlineDeploymentsClientBeginCreateOrUpdateOptions contains the optional parameters for the OnlineDeploymentsClient.BeginCreateOrUpdate +// PrivateEndpointConnectionsClientCreateOrUpdateOptions contains the optional parameters for the PrivateEndpointConnectionsClient.CreateOrUpdate // method. -type OnlineDeploymentsClientBeginCreateOrUpdateOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string +type PrivateEndpointConnectionsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters } -// OnlineDeploymentsClientBeginDeleteOptions contains the optional parameters for the OnlineDeploymentsClient.BeginDelete +// PrivateEndpointConnectionsClientDeleteOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Delete // method. -type OnlineDeploymentsClientBeginDeleteOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string +type PrivateEndpointConnectionsClientDeleteOptions struct { + // placeholder for future optional parameters } -// OnlineDeploymentsClientBeginUpdateOptions contains the optional parameters for the OnlineDeploymentsClient.BeginUpdate +// PrivateEndpointConnectionsClientGetOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Get // method. -type OnlineDeploymentsClientBeginUpdateOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string +type PrivateEndpointConnectionsClientGetOptions struct { + // placeholder for future optional parameters } -// OnlineDeploymentsClientGetLogsOptions contains the optional parameters for the OnlineDeploymentsClient.GetLogs method. -type OnlineDeploymentsClientGetLogsOptions struct { +// PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.NewListPager +// method. +type PrivateEndpointConnectionsClientListOptions struct { // placeholder for future optional parameters } -// OnlineDeploymentsClientGetOptions contains the optional parameters for the OnlineDeploymentsClient.Get method. -type OnlineDeploymentsClientGetOptions struct { - // placeholder for future optional parameters +// PrivateEndpointDestination - Private Endpoint destination for a Private Endpoint Outbound Rule for the managed network +// of a machine learning workspace. +type PrivateEndpointDestination struct { + ServiceResourceID *string + SparkEnabled *bool + + // Type of a managed network Outbound Rule of a machine learning workspace. + SparkStatus *RuleStatus + SubresourceTarget *string } -// OnlineDeploymentsClientListOptions contains the optional parameters for the OnlineDeploymentsClient.NewListPager method. -type OnlineDeploymentsClientListOptions struct { - // Ordering of list. - OrderBy *string - // Continuation token for pagination. - Skip *string - // Top of list. - Top *int32 +// PrivateEndpointOutboundRule - Private Endpoint Outbound Rule for the managed network of a machine learning workspace. +type PrivateEndpointOutboundRule struct { + // REQUIRED; Type of a managed network Outbound Rule of a machine learning workspace. + Type *RuleType + + // Category of a managed network Outbound Rule of a machine learning workspace. + Category *RuleCategory + + // Private Endpoint destination for a Private Endpoint Outbound Rule for the managed network of a machine learning workspace. + Destination *PrivateEndpointDestination + + // Type of a managed network Outbound Rule of a machine learning workspace. + Status *RuleStatus } -// OnlineDeploymentsClientListSKUsOptions contains the optional parameters for the OnlineDeploymentsClient.NewListSKUsPager -// method. -type OnlineDeploymentsClientListSKUsOptions struct { - // Number of Skus to be retrieved in a page of results. - Count *int32 - // Continuation token for pagination. - Skip *string +// GetOutboundRule implements the OutboundRuleClassification interface for type PrivateEndpointOutboundRule. +func (p *PrivateEndpointOutboundRule) GetOutboundRule() *OutboundRule { + return &OutboundRule{ + Category: p.Category, + Status: p.Status, + Type: p.Type, + } } -type OnlineEndpoint struct { - // REQUIRED; The geo-location where the resource lives - Location *string +// PrivateEndpointResource - The PE network resource that is linked to this PE connection. +type PrivateEndpointResource struct { + // The subnetId that the private endpoint is connected to. + SubnetArmID *string - // REQUIRED; [Required] Additional attributes of the entity. - Properties *OnlineEndpointProperties + // READ-ONLY; The ARM identifier for Private Endpoint + ID *string +} +// PrivateLinkResource - A private link resource +type PrivateLinkResource struct { // Managed service identity (system assigned and/or user assigned identities) Identity *ManagedServiceIdentity - // Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type. - Kind *string + // Same as workspace location. + Location *string - // Sku details required for ARM contract for Autoscaling. + // Properties of a private link resource. + Properties *PrivateLinkResourceProperties + + // Optional. This field is required to be implemented by the RP because AML is supporting more than one tier SKU *SKU - // Resource tags. + // Dictionary of Tags map[string]*string // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} @@ -6019,384 +10099,321 @@ type OnlineEndpoint struct { Type *string } -// OnlineEndpointProperties - Online endpoint configuration -type OnlineEndpointProperties struct { - // REQUIRED; [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure Machine Learning token-based authentication. - // 'Key' doesn't expire but 'AMLToken' does. - AuthMode *EndpointAuthMode - - // ARM resource ID of the compute if it exists. optional - Compute *string - - // Description of the inference endpoint. - Description *string - - // EndpointAuthKeys to set initially on an Endpoint. This property will always be returned as null. AuthKey values must be - // retrieved using the ListKeys API. - Keys *EndpointAuthKeys - - // Property dictionary. Properties can be added, but not removed or altered. - Properties map[string]*string - - // Set to "Enabled" for endpoints that should allow public access when Private Link is enabled. - PublicNetworkAccess *PublicNetworkAccessType - - // Percentage of traffic from endpoint to divert to each deployment. Traffic values need to sum to 100. - Traffic map[string]*int32 - - // READ-ONLY; Provisioning state for the endpoint. - ProvisioningState *EndpointProvisioningState - - // READ-ONLY; Endpoint URI. - ScoringURI *string - - // READ-ONLY; Endpoint Swagger URI. - SwaggerURI *string +// PrivateLinkResourceListResult - A list of private link resources +type PrivateLinkResourceListResult struct { + Value []*PrivateLinkResource } -// OnlineEndpointTrackedResourceArmPaginatedResult - A paginated list of OnlineEndpoint entities. -type OnlineEndpointTrackedResourceArmPaginatedResult struct { - // The link to the next page of OnlineEndpoint objects. If null, there are no additional pages. - NextLink *string - - // An array of objects of type OnlineEndpoint. - Value []*OnlineEndpoint -} +// PrivateLinkResourceProperties - Properties of a private link resource. +type PrivateLinkResourceProperties struct { + // The private link resource group id. + GroupID *string -// OnlineEndpointsClientBeginCreateOrUpdateOptions contains the optional parameters for the OnlineEndpointsClient.BeginCreateOrUpdate -// method. -type OnlineEndpointsClientBeginCreateOrUpdateOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string -} + // The private link resource required member names. + RequiredMembers []*string -// OnlineEndpointsClientBeginDeleteOptions contains the optional parameters for the OnlineEndpointsClient.BeginDelete method. -type OnlineEndpointsClientBeginDeleteOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string + // The private link resource Private link DNS zone name. + RequiredZoneNames []*string } -// OnlineEndpointsClientBeginRegenerateKeysOptions contains the optional parameters for the OnlineEndpointsClient.BeginRegenerateKeys +// PrivateLinkResourcesClientListOptions contains the optional parameters for the PrivateLinkResourcesClient.NewListPager // method. -type OnlineEndpointsClientBeginRegenerateKeysOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string +type PrivateLinkResourcesClientListOptions struct { + // placeholder for future optional parameters } -// OnlineEndpointsClientBeginUpdateOptions contains the optional parameters for the OnlineEndpointsClient.BeginUpdate method. -type OnlineEndpointsClientBeginUpdateOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string -} +// PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer +// and provider. +type PrivateLinkServiceConnectionState struct { + // Some RP chose "None". Other RPs use this for region expansion. + ActionsRequired *string -// OnlineEndpointsClientGetOptions contains the optional parameters for the OnlineEndpointsClient.Get method. -type OnlineEndpointsClientGetOptions struct { - // placeholder for future optional parameters -} + // User-defined message that, per NRP doc, may be used for approval-related message. + Description *string -// OnlineEndpointsClientGetTokenOptions contains the optional parameters for the OnlineEndpointsClient.GetToken method. -type OnlineEndpointsClientGetTokenOptions struct { - // placeholder for future optional parameters + // Connection status of the service consumer with the service provider + Status *EndpointServiceConnectionStatus } -// OnlineEndpointsClientListKeysOptions contains the optional parameters for the OnlineEndpointsClient.ListKeys method. -type OnlineEndpointsClientListKeysOptions struct { - // placeholder for future optional parameters -} +// ProbeSettings - Deployment container liveness/readiness probe configuration. +type ProbeSettings struct { + // The number of failures to allow before returning an unhealthy status. + FailureThreshold *int32 -// OnlineEndpointsClientListOptions contains the optional parameters for the OnlineEndpointsClient.NewListPager method. -type OnlineEndpointsClientListOptions struct { - // EndpointComputeType to be filtered by. - ComputeType *EndpointComputeType - // Number of endpoints to be retrieved in a page of results. - Count *int32 - // Name of the endpoint. - Name *string - // The option to order the response. - OrderBy *OrderString - // A set of properties with which to filter the returned models. It is a comma separated string of properties key and/or properties - // key=value Example: propKey1,propKey2,propKey3=value3 . - Properties *string - // Continuation token for pagination. - Skip *string - // A set of tags with which to filter the returned models. It is a comma separated string of tags key or tags key=value. Example: - // tagKey1,tagKey2,tagKey3=value3 . - Tags *string -} + // The delay before the first probe in ISO 8601 format. + InitialDelay *string -// OnlineRequestSettings - Online deployment scoring requests configuration. -type OnlineRequestSettings struct { - // The number of maximum concurrent requests per node allowed per deployment. Defaults to 1. - MaxConcurrentRequestsPerInstance *int32 + // The length of time between probes in ISO 8601 format. + Period *string - // The maximum amount of time a request will stay in the queue in ISO 8601 format. Defaults to 500ms. - MaxQueueWait *string + // The number of successful probes before returning a healthy status. + SuccessThreshold *int32 - // The scoring timeout in ISO 8601 format. Defaults to 5000ms. - RequestTimeout *string + // The probe timeout in ISO 8601 format. + Timeout *string } -// OnlineScaleSettingsClassification provides polymorphic access to related types. -// Call the interface's GetOnlineScaleSettings() method to access the common type. -// Use a type switch to determine the concrete type. The possible types are: -// - *DefaultScaleSettings, *OnlineScaleSettings, *TargetUtilizationScaleSettings -type OnlineScaleSettingsClassification interface { - // GetOnlineScaleSettings returns the OnlineScaleSettings content of the underlying type. - GetOnlineScaleSettings() *OnlineScaleSettings -} +// ProgressMetrics - Progress metrics definition +type ProgressMetrics struct { + // READ-ONLY; The completed datapoint count. + CompletedDatapointCount *int64 -// OnlineScaleSettings - Online deployment scaling configuration. -type OnlineScaleSettings struct { - // REQUIRED; [Required] Type of deployment scaling algorithm - ScaleType *ScaleType -} + // READ-ONLY; The time of last successful incremental data refresh in UTC. + IncrementalDataLastRefreshDateTime *time.Time -// GetOnlineScaleSettings implements the OnlineScaleSettingsClassification interface for type OnlineScaleSettings. -func (o *OnlineScaleSettings) GetOnlineScaleSettings() *OnlineScaleSettings { return o } + // READ-ONLY; The skipped datapoint count. + SkippedDatapointCount *int64 -// OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. -type OperationsClientListOptions struct { - // placeholder for future optional parameters + // READ-ONLY; The total datapoint count. + TotalDatapointCount *int64 } -// OutputPathAssetReference - Reference to an asset via its path in a job output. -type OutputPathAssetReference struct { - // REQUIRED; [Required] Specifies the type of asset reference. - ReferenceType *ReferenceType - - // ARM resource ID of the job. - JobID *string +// PyTorch distribution configuration. +type PyTorch struct { + // REQUIRED; [Required] Specifies the type of distribution framework. + DistributionType *DistributionType - // The path of the file/directory in the job output. - Path *string + // Number of processes per node. + ProcessCountPerInstance *int32 } -// GetAssetReferenceBase implements the AssetReferenceBaseClassification interface for type OutputPathAssetReference. -func (o *OutputPathAssetReference) GetAssetReferenceBase() *AssetReferenceBase { - return &AssetReferenceBase{ - ReferenceType: o.ReferenceType, +// GetDistributionConfiguration implements the DistributionConfigurationClassification interface for type PyTorch. +func (p *PyTorch) GetDistributionConfiguration() *DistributionConfiguration { + return &DistributionConfiguration{ + DistributionType: p.DistributionType, } } -type PATAuthTypeWorkspaceConnectionProperties struct { - // REQUIRED; Authentication type of the connection target - AuthType *ConnectionAuthType +type QueueSettings struct { + // Controls the compute job tier + JobTier *JobTier - // Category of the connection - Category *ConnectionCategory - Credentials *WorkspaceConnectionPersonalAccessToken - Target *string + // Controls the priority of the job on a compute. + Priority *int32 +} - // Value details of the workspace connection. - Value *string +// QuotaBaseProperties - The properties for Quota update or retrieval. +type QuotaBaseProperties struct { + // Specifies the resource ID. + ID *string - // format for the workspace connection value - ValueFormat *ValueFormat -} + // The maximum permitted quota of the resource. + Limit *int64 -// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type PATAuthTypeWorkspaceConnectionProperties. -func (p *PATAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { - return &WorkspaceConnectionPropertiesV2{ - AuthType: p.AuthType, - Category: p.Category, - Target: p.Target, - Value: p.Value, - ValueFormat: p.ValueFormat, - } + // Specifies the resource type. + Type *string + + // An enum describing the unit of quota measurement. + Unit *QuotaUnit } -// PaginatedComputeResourcesList - Paginated list of Machine Learning compute objects wrapped in ARM resource envelope. -type PaginatedComputeResourcesList struct { - // A continuation link (absolute URI) to the next page of results in the list. - NextLink *string +// QuotaUpdateParameters - Quota update parameters. +type QuotaUpdateParameters struct { + // Region of workspace quota to be updated. + Location *string - // An array of Machine Learning compute objects wrapped in ARM resource envelope. - Value []*ComputeResource + // The list for update quota. + Value []*QuotaBaseProperties } -// PartialBatchDeployment - Mutable batch inference settings per deployment. -type PartialBatchDeployment struct { - // Description of the endpoint deployment. - Description *string +// QuotasClientListOptions contains the optional parameters for the QuotasClient.NewListPager method. +type QuotasClientListOptions struct { + // placeholder for future optional parameters } -// PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties - Strictly used in update requests. -type PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties struct { - // Additional attributes of the entity. - Properties *PartialBatchDeployment - - // Resource tags. - Tags map[string]*string +// QuotasClientUpdateOptions contains the optional parameters for the QuotasClient.Update method. +type QuotasClientUpdateOptions struct { + // placeholder for future optional parameters } -// PartialManagedServiceIdentity - Managed service identity (system assigned and/or user assigned identities) -type PartialManagedServiceIdentity struct { - // Managed service identity (system assigned and/or user assigned identities) - Type *ManagedServiceIdentityType +// RandomSamplingAlgorithm - Defines a Sampling Algorithm that generates values randomly +type RandomSamplingAlgorithm struct { + // REQUIRED; [Required] The algorithm used for generating hyperparameter values, along with configuration properties + SamplingAlgorithmType *SamplingAlgorithmType - // The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM - // resource ids in the form: - // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. - // The dictionary values can be empty objects ({}) in - // requests. - UserAssignedIdentities map[string]any -} + // An optional positive number or e in string format to be used as base for log based random sampling + Logbase *string -// PartialMinimalTrackedResource - Strictly used in update requests. -type PartialMinimalTrackedResource struct { - // Resource tags. - Tags map[string]*string -} + // The specific type of random algorithm + Rule *RandomSamplingAlgorithmRule -// PartialMinimalTrackedResourceWithIdentity - Strictly used in update requests. -type PartialMinimalTrackedResourceWithIdentity struct { - // Managed service identity (system assigned and/or user assigned identities) - Identity *PartialManagedServiceIdentity + // An optional integer to use as the seed for random number generation + Seed *int32 +} - // Resource tags. - Tags map[string]*string +// GetSamplingAlgorithm implements the SamplingAlgorithmClassification interface for type RandomSamplingAlgorithm. +func (r *RandomSamplingAlgorithm) GetSamplingAlgorithm() *SamplingAlgorithm { + return &SamplingAlgorithm{ + SamplingAlgorithmType: r.SamplingAlgorithmType, + } } -// PartialMinimalTrackedResourceWithSKU - Strictly used in update requests. -type PartialMinimalTrackedResourceWithSKU struct { - // Sku details required for ARM contract for Autoscaling. - SKU *PartialSKU +// Ray distribution configuration. +type Ray struct { + // REQUIRED; [Required] Specifies the type of distribution framework. + DistributionType *DistributionType - // Resource tags. - Tags map[string]*string -} + // The address of Ray head node. + Address *string -// PartialSKU - Common SKU definition. -type PartialSKU struct { - // If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the - // resource this may be omitted. - Capacity *int32 + // The port to bind the dashboard server to. + DashboardPort *int32 - // If the service has different generations of hardware, for the same SKU, then that can be captured here. - Family *string + // Additional arguments passed to ray start in head node. + HeadNodeAdditionalArgs *string - // The name of the SKU. Ex - P3. It is typically a letter+number code. - Name *string + // Provide this argument to start the Ray dashboard GUI. + IncludeDashboard *bool - // The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code. - Size *string + // The port of the head ray process. + Port *int32 - // This field is required to be implemented by the Resource Provider if the service has more than one tier, but is not required - // on a PUT. - Tier *SKUTier + // Additional arguments passed to ray start in worker node. + WorkerNodeAdditionalArgs *string } -type Password struct { - // READ-ONLY - Name *string - - // READ-ONLY - Value *string +// GetDistributionConfiguration implements the DistributionConfigurationClassification interface for type Ray. +func (r *Ray) GetDistributionConfiguration() *DistributionConfiguration { + return &DistributionConfiguration{ + DistributionType: r.DistributionType, + } } -// PersonalComputeInstanceSettings - Settings for a personal compute instance. -type PersonalComputeInstanceSettings struct { - // A user explicitly assigned to a personal compute instance. - AssignedUser *AssignedUser +// Recurrence - The workflow trigger recurrence for ComputeStartStop schedule type. +type Recurrence struct { + // [Required] The frequency to trigger schedule. + Frequency *RecurrenceFrequency + + // [Required] Specifies schedule interval in conjunction with frequency + Interval *int32 + + // [Required] The recurrence schedule. + Schedule *RecurrenceSchedule + + // The start time in yyyy-MM-ddTHH:mm:ss format. + StartTime *string + + // Specifies time zone in which the schedule runs. TimeZone should follow Windows time zone format. Refer: + // https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11 + TimeZone *string } -// PipelineJob - Pipeline Job definition: defines generic to MFE attributes. -type PipelineJob struct { - // REQUIRED; [Required] Specifies the type of job. - JobType *JobType +type RecurrenceSchedule struct { + // REQUIRED; [Required] List of hours for the schedule. + Hours []*int32 - // ARM resource ID of the component resource. - ComponentID *string + // REQUIRED; [Required] List of minutes for the schedule. + Minutes []*int32 - // ARM resource ID of the compute resource. - ComputeID *string + // List of month days for the schedule + MonthDays []*int32 - // The asset description text. - Description *string + // List of days for the schedule. + WeekDays []*WeekDay +} - // Display name of job. - DisplayName *string +type RecurrenceTrigger struct { + // REQUIRED; [Required] The frequency to trigger schedule. + Frequency *RecurrenceFrequency - // The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment. - ExperimentName *string + // REQUIRED; [Required] Specifies schedule interval in conjunction with frequency + Interval *int32 - // Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken - // if null. - Identity IdentityConfigurationClassification + // REQUIRED; [Required] + TriggerType *TriggerType - // Inputs for the pipeline job. - Inputs map[string]JobInputClassification + // Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer https://en.wikipedia.org/wiki/ISO_8601. Recommented + // format would be "2022-06-01T00:00:01" If not present, the schedule will + // run indefinitely + EndTime *string - // Is the asset archived? - IsArchived *bool + // The recurrence schedule. + Schedule *RecurrenceSchedule - // Jobs construct the Pipeline Job. - Jobs map[string]any + // Specifies start time of schedule in ISO 8601 format, but without a UTC offset. + StartTime *string - // Outputs for the pipeline job - Outputs map[string]JobOutputClassification + // Specifies time zone in which the schedule runs. TimeZone should follow Windows time zone format. Refer: + // https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11 + TimeZone *string +} - // The asset property dictionary. - Properties map[string]*string +// GetTriggerBase implements the TriggerBaseClassification interface for type RecurrenceTrigger. +func (r *RecurrenceTrigger) GetTriggerBase() *TriggerBase { + return &TriggerBase{ + EndTime: r.EndTime, + StartTime: r.StartTime, + TimeZone: r.TimeZone, + TriggerType: r.TriggerType, + } +} - // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. - Services map[string]*JobService +type RegenerateEndpointKeysRequest struct { + // REQUIRED; [Required] Specification for which type of key to generate. Primary or Secondary. + KeyType *KeyType - // Pipeline settings, for things like ContinueRunOnStepFailure etc. - Settings any + // The value the key is set to. + KeyValue *string +} - // ARM resource ID of source job. - SourceJobID *string +// RegistriesClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistriesClient.BeginCreateOrUpdate +// method. +type RegistriesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Tag dictionary. Tags can be added, removed, and updated. - Tags map[string]*string +// RegistriesClientBeginDeleteOptions contains the optional parameters for the RegistriesClient.BeginDelete method. +type RegistriesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // READ-ONLY; Status of the job. - Status *JobStatus +// RegistriesClientBeginRemoveRegionsOptions contains the optional parameters for the RegistriesClient.BeginRemoveRegions +// method. +type RegistriesClientBeginRemoveRegionsOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// GetJobBaseProperties implements the JobBasePropertiesClassification interface for type PipelineJob. -func (p *PipelineJob) GetJobBaseProperties() *JobBaseProperties { - return &JobBaseProperties{ - ComponentID: p.ComponentID, - ComputeID: p.ComputeID, - DisplayName: p.DisplayName, - ExperimentName: p.ExperimentName, - Identity: p.Identity, - IsArchived: p.IsArchived, - JobType: p.JobType, - Services: p.Services, - Status: p.Status, - Description: p.Description, - Properties: p.Properties, - Tags: p.Tags, - } +// RegistriesClientGetOptions contains the optional parameters for the RegistriesClient.Get method. +type RegistriesClientGetOptions struct { + // placeholder for future optional parameters } -// PrivateEndpoint - The Private Endpoint resource. -type PrivateEndpoint struct { - // READ-ONLY; The ARM identifier for Private Endpoint - ID *string +// RegistriesClientListBySubscriptionOptions contains the optional parameters for the RegistriesClient.NewListBySubscriptionPager +// method. +type RegistriesClientListBySubscriptionOptions struct { + // placeholder for future optional parameters +} - // READ-ONLY; The ARM identifier for Subnet resource that private endpoint links to - SubnetArmID *string +// RegistriesClientListOptions contains the optional parameters for the RegistriesClient.NewListPager method. +type RegistriesClientListOptions struct { + // placeholder for future optional parameters } -// PrivateEndpointConnection - The Private Endpoint Connection resource. -type PrivateEndpointConnection struct { - // The identity of the resource. - Identity *ManagedServiceIdentity +// RegistriesClientUpdateOptions contains the optional parameters for the RegistriesClient.Update method. +type RegistriesClientUpdateOptions struct { + // placeholder for future optional parameters +} - // Specifies the location of the resource. +type Registry struct { + // REQUIRED; The geo-location where the resource lives Location *string - // Resource properties. - Properties *PrivateEndpointConnectionProperties + // REQUIRED; [Required] Additional attributes of the entity. + Properties *RegistryProperties + + // Managed service identity (system assigned and/or user assigned identities) + Identity *ManagedServiceIdentity + + // Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type. + Kind *string - // The sku of the workspace. + // Sku details required for ARM contract for Autoscaling. SKU *SKU - // Contains resource tags defined as key/value pairs. + // Resource tags. Tags map[string]*string // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} @@ -6412,267 +10429,456 @@ type PrivateEndpointConnection struct { Type *string } -// PrivateEndpointConnectionListResult - List of private endpoint connection associated with the specified workspace -type PrivateEndpointConnectionListResult struct { - // Array of private endpoint connections - Value []*PrivateEndpointConnection +// RegistryCodeContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryCodeContainersClient.BeginCreateOrUpdate +// method. +type RegistryCodeContainersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// PrivateEndpointConnectionProperties - Properties of the PrivateEndpointConnectProperties. -type PrivateEndpointConnectionProperties struct { - // REQUIRED; A collection of information about the state of the connection between service consumer and provider. - PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState +// RegistryCodeContainersClientBeginDeleteOptions contains the optional parameters for the RegistryCodeContainersClient.BeginDelete +// method. +type RegistryCodeContainersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // The resource of private end point. - PrivateEndpoint *PrivateEndpoint +// RegistryCodeContainersClientGetOptions contains the optional parameters for the RegistryCodeContainersClient.Get method. +type RegistryCodeContainersClientGetOptions struct { + // placeholder for future optional parameters +} - // READ-ONLY; The provisioning state of the private endpoint connection resource. - ProvisioningState *PrivateEndpointConnectionProvisioningState +// RegistryCodeContainersClientListOptions contains the optional parameters for the RegistryCodeContainersClient.NewListPager +// method. +type RegistryCodeContainersClientListOptions struct { + // Continuation token for pagination. + Skip *string } -// PrivateEndpointConnectionsClientCreateOrUpdateOptions contains the optional parameters for the PrivateEndpointConnectionsClient.CreateOrUpdate +// RegistryCodeVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryCodeVersionsClient.BeginCreateOrUpdate // method. -type PrivateEndpointConnectionsClientCreateOrUpdateOptions struct { - // placeholder for future optional parameters +type RegistryCodeVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// PrivateEndpointConnectionsClientDeleteOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Delete +// RegistryCodeVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryCodeVersionsClient.BeginDelete // method. -type PrivateEndpointConnectionsClientDeleteOptions struct { - // placeholder for future optional parameters +type RegistryCodeVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// PrivateEndpointConnectionsClientGetOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Get +// RegistryCodeVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the RegistryCodeVersionsClient.CreateOrGetStartPendingUpload // method. -type PrivateEndpointConnectionsClientGetOptions struct { +type RegistryCodeVersionsClientCreateOrGetStartPendingUploadOptions struct { // placeholder for future optional parameters } -// PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.NewListPager -// method. -type PrivateEndpointConnectionsClientListOptions struct { +// RegistryCodeVersionsClientGetOptions contains the optional parameters for the RegistryCodeVersionsClient.Get method. +type RegistryCodeVersionsClientGetOptions struct { // placeholder for future optional parameters } -// PrivateLinkResource - A private link resource -type PrivateLinkResource struct { - // The identity of the resource. - Identity *ManagedServiceIdentity +// RegistryCodeVersionsClientListOptions contains the optional parameters for the RegistryCodeVersionsClient.NewListPager +// method. +type RegistryCodeVersionsClientListOptions struct { + // Ordering of list. + OrderBy *string + // Continuation token for pagination. + Skip *string + // Maximum number of records to return. + Top *int32 +} - // Specifies the location of the resource. - Location *string +// RegistryComponentContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryComponentContainersClient.BeginCreateOrUpdate +// method. +type RegistryComponentContainersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Resource properties. - Properties *PrivateLinkResourceProperties +// RegistryComponentContainersClientBeginDeleteOptions contains the optional parameters for the RegistryComponentContainersClient.BeginDelete +// method. +type RegistryComponentContainersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // The sku of the workspace. - SKU *SKU +// RegistryComponentContainersClientGetOptions contains the optional parameters for the RegistryComponentContainersClient.Get +// method. +type RegistryComponentContainersClientGetOptions struct { + // placeholder for future optional parameters +} - // Contains resource tags defined as key/value pairs. - Tags map[string]*string +// RegistryComponentContainersClientListOptions contains the optional parameters for the RegistryComponentContainersClient.NewListPager +// method. +type RegistryComponentContainersClientListOptions struct { + // Continuation token for pagination. + Skip *string +} - // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string +// RegistryComponentVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryComponentVersionsClient.BeginCreateOrUpdate +// method. +type RegistryComponentVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // READ-ONLY; The name of the resource - Name *string +// RegistryComponentVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryComponentVersionsClient.BeginDelete +// method. +type RegistryComponentVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData +// RegistryComponentVersionsClientGetOptions contains the optional parameters for the RegistryComponentVersionsClient.Get +// method. +type RegistryComponentVersionsClientGetOptions struct { + // placeholder for future optional parameters +} - // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string +// RegistryComponentVersionsClientListOptions contains the optional parameters for the RegistryComponentVersionsClient.NewListPager +// method. +type RegistryComponentVersionsClientListOptions struct { + // Ordering of list. + OrderBy *string + // Continuation token for pagination. + Skip *string + // Component stage. + Stage *string + // Maximum number of records to return. + Top *int32 } -// PrivateLinkResourceListResult - A list of private link resources -type PrivateLinkResourceListResult struct { - // Array of private link resources - Value []*PrivateLinkResource +// RegistryDataContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryDataContainersClient.BeginCreateOrUpdate +// method. +type RegistryDataContainersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// PrivateLinkResourceProperties - Properties of a private link resource. -type PrivateLinkResourceProperties struct { - // The private link resource Private link DNS zone name. - RequiredZoneNames []*string +// RegistryDataContainersClientBeginDeleteOptions contains the optional parameters for the RegistryDataContainersClient.BeginDelete +// method. +type RegistryDataContainersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // READ-ONLY; The private link resource group id. - GroupID *string +// RegistryDataContainersClientGetOptions contains the optional parameters for the RegistryDataContainersClient.Get method. +type RegistryDataContainersClientGetOptions struct { + // placeholder for future optional parameters +} - // READ-ONLY; The private link resource required member names. - RequiredMembers []*string +// RegistryDataContainersClientListOptions contains the optional parameters for the RegistryDataContainersClient.NewListPager +// method. +type RegistryDataContainersClientListOptions struct { + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // Continuation token for pagination. + Skip *string } -// PrivateLinkResourcesClientListOptions contains the optional parameters for the PrivateLinkResourcesClient.List method. -type PrivateLinkResourcesClientListOptions struct { +// RegistryDataVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryDataVersionsClient.BeginCreateOrUpdate +// method. +type RegistryDataVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// RegistryDataVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryDataVersionsClient.BeginDelete +// method. +type RegistryDataVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// RegistryDataVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the RegistryDataVersionsClient.CreateOrGetStartPendingUpload +// method. +type RegistryDataVersionsClientCreateOrGetStartPendingUploadOptions struct { // placeholder for future optional parameters } -// PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer -// and provider. -type PrivateLinkServiceConnectionState struct { - // A message indicating if changes on the service provider require any updates on the consumer. - ActionsRequired *string +// RegistryDataVersionsClientGetOptions contains the optional parameters for the RegistryDataVersionsClient.Get method. +type RegistryDataVersionsClientGetOptions struct { + // placeholder for future optional parameters +} - // The reason for approval/rejection of the connection. - Description *string +// RegistryDataVersionsClientListOptions contains the optional parameters for the RegistryDataVersionsClient.NewListPager +// method. +type RegistryDataVersionsClientListOptions struct { + // [ListViewType.ActiveOnly, ListViewType.ArchivedOnly, ListViewType.All]View type for including/excluding (for example) archived + // entities. + ListViewType *ListViewType + // Please choose OrderBy value from ['createdtime', 'modifiedtime'] + OrderBy *string + // Continuation token for pagination. + Skip *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string + // Top count of results, top count cannot be greater than the page size. If topCount > page size, results with be default + // page size count will be returned + Top *int32 +} - // Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. - Status *PrivateEndpointServiceConnectionStatus +// RegistryEnvironmentContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryEnvironmentContainersClient.BeginCreateOrUpdate +// method. +type RegistryEnvironmentContainersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// ProbeSettings - Deployment container liveness/readiness probe configuration. -type ProbeSettings struct { - // The number of failures to allow before returning an unhealthy status. - FailureThreshold *int32 +// RegistryEnvironmentContainersClientBeginDeleteOptions contains the optional parameters for the RegistryEnvironmentContainersClient.BeginDelete +// method. +type RegistryEnvironmentContainersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // The delay before the first probe in ISO 8601 format. - InitialDelay *string +// RegistryEnvironmentContainersClientGetOptions contains the optional parameters for the RegistryEnvironmentContainersClient.Get +// method. +type RegistryEnvironmentContainersClientGetOptions struct { + // placeholder for future optional parameters +} - // The length of time between probes in ISO 8601 format. - Period *string +// RegistryEnvironmentContainersClientListOptions contains the optional parameters for the RegistryEnvironmentContainersClient.NewListPager +// method. +type RegistryEnvironmentContainersClientListOptions struct { + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // Continuation token for pagination. + Skip *string +} - // The number of successful probes before returning a healthy status. - SuccessThreshold *int32 +// RegistryEnvironmentVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.BeginCreateOrUpdate +// method. +type RegistryEnvironmentVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // The probe timeout in ISO 8601 format. - Timeout *string +// RegistryEnvironmentVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.BeginDelete +// method. +type RegistryEnvironmentVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// PyTorch distribution configuration. -type PyTorch struct { - // REQUIRED; [Required] Specifies the type of distribution framework. - DistributionType *DistributionType +// RegistryEnvironmentVersionsClientGetOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.Get +// method. +type RegistryEnvironmentVersionsClientGetOptions struct { + // placeholder for future optional parameters +} - // Number of processes per node. - ProcessCountPerInstance *int32 +// RegistryEnvironmentVersionsClientListOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.NewListPager +// method. +type RegistryEnvironmentVersionsClientListOptions struct { + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // Ordering of list. + OrderBy *string + // Continuation token for pagination. + Skip *string + // Stage for including/excluding (for example) archived entities. Takes priority over listViewType + Stage *string + // Maximum number of records to return. + Top *int32 } -// GetDistributionConfiguration implements the DistributionConfigurationClassification interface for type PyTorch. -func (p *PyTorch) GetDistributionConfiguration() *DistributionConfiguration { - return &DistributionConfiguration{ - DistributionType: p.DistributionType, - } +type RegistryListCredentialsResult struct { + Passwords []*Password + + // READ-ONLY; The location of the workspace ACR + Location *string + + // READ-ONLY; The username of the workspace ACR + Username *string } -// QuotaBaseProperties - The properties for Quota update or retrieval. -type QuotaBaseProperties struct { - // Specifies the resource ID. - ID *string +// RegistryModelContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryModelContainersClient.BeginCreateOrUpdate +// method. +type RegistryModelContainersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // The maximum permitted quota of the resource. - Limit *int64 +// RegistryModelContainersClientBeginDeleteOptions contains the optional parameters for the RegistryModelContainersClient.BeginDelete +// method. +type RegistryModelContainersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} - // Specifies the resource type. - Type *string +// RegistryModelContainersClientGetOptions contains the optional parameters for the RegistryModelContainersClient.Get method. +type RegistryModelContainersClientGetOptions struct { + // placeholder for future optional parameters +} - // An enum describing the unit of quota measurement. - Unit *QuotaUnit +// RegistryModelContainersClientListOptions contains the optional parameters for the RegistryModelContainersClient.NewListPager +// method. +type RegistryModelContainersClientListOptions struct { + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // Continuation token for pagination. + Skip *string +} + +// RegistryModelVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryModelVersionsClient.BeginCreateOrUpdate +// method. +type RegistryModelVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// RegistryModelVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryModelVersionsClient.BeginDelete +// method. +type RegistryModelVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// QuotaUpdateParameters - Quota update parameters. -type QuotaUpdateParameters struct { - // Region of workspace quota to be updated. - Location *string - - // The list for update quota. - Value []*QuotaBaseProperties +// RegistryModelVersionsClientBeginPackageOptions contains the optional parameters for the RegistryModelVersionsClient.BeginPackage +// method. +type RegistryModelVersionsClientBeginPackageOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string } -// QuotasClientListOptions contains the optional parameters for the QuotasClient.NewListPager method. -type QuotasClientListOptions struct { +// RegistryModelVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the RegistryModelVersionsClient.CreateOrGetStartPendingUpload +// method. +type RegistryModelVersionsClientCreateOrGetStartPendingUploadOptions struct { // placeholder for future optional parameters } -// QuotasClientUpdateOptions contains the optional parameters for the QuotasClient.Update method. -type QuotasClientUpdateOptions struct { +// RegistryModelVersionsClientGetOptions contains the optional parameters for the RegistryModelVersionsClient.Get method. +type RegistryModelVersionsClientGetOptions struct { // placeholder for future optional parameters } -// RandomSamplingAlgorithm - Defines a Sampling Algorithm that generates values randomly -type RandomSamplingAlgorithm struct { - // REQUIRED; [Required] The algorithm used for generating hyperparameter values, along with configuration properties - SamplingAlgorithmType *SamplingAlgorithmType +// RegistryModelVersionsClientListOptions contains the optional parameters for the RegistryModelVersionsClient.NewListPager +// method. +type RegistryModelVersionsClientListOptions struct { + // Model description. + Description *string + // View type for including/excluding (for example) archived entities. + ListViewType *ListViewType + // Ordering of list. + OrderBy *string + // Comma-separated list of property names (and optionally values). Example: prop1,prop2=value2 + Properties *string + // Continuation token for pagination. + Skip *string + // Comma-separated list of tag names (and optionally values). Example: tag1,tag2=value2 + Tags *string + // Maximum number of records to return. + Top *int32 + // Version identifier. + Version *string +} - // The specific type of random algorithm - Rule *RandomSamplingAlgorithmRule +// RegistryPartialManagedServiceIdentity - Managed service identity (system assigned and/or user assigned identities) +type RegistryPartialManagedServiceIdentity struct { + // REQUIRED; Type of managed service identity (where both SystemAssigned and UserAssigned types are allowed). + Type *ManagedServiceIdentityType - // An optional integer to use as the seed for random number generation - Seed *int32 + // The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM + // resource ids in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. + // The dictionary values can be empty objects ({}) in + // requests. + UserAssignedIdentities map[string]*UserAssignedIdentity + + // READ-ONLY; The service principal ID of the system assigned identity. This property will only be provided for a system assigned + // identity. + PrincipalID *string + + // READ-ONLY; The tenant ID of the system assigned identity. This property will only be provided for a system assigned identity. + TenantID *string } -// GetSamplingAlgorithm implements the SamplingAlgorithmClassification interface for type RandomSamplingAlgorithm. -func (r *RandomSamplingAlgorithm) GetSamplingAlgorithm() *SamplingAlgorithm { - return &SamplingAlgorithm{ - SamplingAlgorithmType: r.SamplingAlgorithmType, - } +// RegistryPrivateEndpointConnection - Private endpoint connection definition. +type RegistryPrivateEndpointConnection struct { + // This is the private endpoint connection name created on SRP Full resource id: + // /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName} + ID *string + + // Same as workspace location. + Location *string + + // Properties of the Private Endpoint Connection + Properties *RegistryPrivateEndpointConnectionProperties } -type RecurrenceSchedule struct { - // REQUIRED; [Required] List of hours for the schedule. - Hours []*int32 +// RegistryPrivateEndpointConnectionProperties - Properties of the Private Endpoint Connection +type RegistryPrivateEndpointConnectionProperties struct { + // The group ids + GroupIDs []*string - // REQUIRED; [Required] List of minutes for the schedule. - Minutes []*int32 + // The PE network resource that is linked to this PE connection. + PrivateEndpoint *PrivateEndpointResource - // List of month days for the schedule - MonthDays []*int32 + // One of null, "Succeeded", "Provisioning", "Failed". While not approved, it's null. + ProvisioningState *string - // List of days for the schedule. - WeekDays []*WeekDay + // The connection state. + RegistryPrivateLinkServiceConnectionState *RegistryPrivateLinkServiceConnectionState } -type RecurrenceTrigger struct { - // REQUIRED; [Required] The frequency to trigger schedule. - Frequency *RecurrenceFrequency +// RegistryPrivateLinkServiceConnectionState - The connection state. +type RegistryPrivateLinkServiceConnectionState struct { + // Some RP chose "None". Other RPs use this for region expansion. + ActionsRequired *string - // REQUIRED; [Required] Specifies schedule interval in conjunction with frequency - Interval *int32 + // User-defined message that, per NRP doc, may be used for approval-related message. + Description *string - // REQUIRED; [Required] - TriggerType *TriggerType + // Connection status of the service consumer with the service provider + Status *EndpointServiceConnectionStatus +} - // Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer https://en.wikipedia.org/wiki/ISO_8601. Recommented - // format would be "2022-06-01T00:00:01" If not present, the schedule will - // run indefinitely - EndTime *string +// RegistryProperties - Details of the Registry +type RegistryProperties struct { + // Discovery URL for the Registry + DiscoveryURL *string - // The recurrence schedule. - Schedule *RecurrenceSchedule + // IntellectualPropertyPublisher for the registry + IntellectualPropertyPublisher *string - // Specifies start time of schedule in ISO 8601 format, but without a UTC offset. - StartTime *string + // ResourceId of the managed RG if the registry has system created resources + ManagedResourceGroup *ArmResourceID - // Specifies time zone in which the schedule runs. TimeZone should follow Windows time zone format. Refer: - // https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11 - TimeZone *string -} + // MLFlow Registry URI for the Registry + MlFlowRegistryURI *string -// GetTriggerBase implements the TriggerBaseClassification interface for type RecurrenceTrigger. -func (r *RecurrenceTrigger) GetTriggerBase() *TriggerBase { - return &TriggerBase{ - EndTime: r.EndTime, - StartTime: r.StartTime, - TimeZone: r.TimeZone, - TriggerType: r.TriggerType, - } -} + // Is the Registry accessible from the internet? Possible values: "Enabled" or "Disabled" + PublicNetworkAccess *string -type RegenerateEndpointKeysRequest struct { - // REQUIRED; [Required] Specification for which type of key to generate. Primary or Secondary. - KeyType *KeyType + // Details of each region the registry is in + RegionDetails []*RegistryRegionArmDetails - // The value the key is set to. - KeyValue *string + // Private endpoint connections info used for pending connections in private link portal + RegistryPrivateEndpointConnections []*RegistryPrivateEndpointConnection } -type RegistryListCredentialsResult struct { - Passwords []*Password +// RegistryRegionArmDetails - Details for each region the registry is in +type RegistryRegionArmDetails struct { + // List of ACR accounts + AcrDetails []*AcrDetails - // READ-ONLY + // The location where the registry exists Location *string - // READ-ONLY - Username *string + // List of storage accounts + StorageAccountDetails []*StorageAccountDetails +} + +// RegistryTrackedResourceArmPaginatedResult - A paginated list of Registry entities. +type RegistryTrackedResourceArmPaginatedResult struct { + // The link to the next page of Registry objects. If null, there are no additional pages. + NextLink *string + + // An array of objects of type Registry. + Value []*Registry } // Regression task in AutoML Table vertical. @@ -6689,6 +10895,9 @@ type Regression struct { // Featurization inputs needed for AutoML job. FeaturizationSettings *TableVerticalFeaturizationSettings + // Model/training parameters that will remain constant throughout training. + FixedParameters *TableFixedParameters + // Execution constraints for AutoMLJob. LimitSettings *TableVerticalLimitSettings @@ -6701,6 +10910,12 @@ type Regression struct { // Primary metric for regression task. PrimaryMetric *RegressionPrimaryMetrics + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*TableParameterSubspace + + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *TableSweepSettings + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. TargetColumnName *string @@ -6736,6 +10951,26 @@ func (r *Regression) GetAutoMLVertical() *AutoMLVertical { } } +type RegressionModelPerformanceMetricThreshold struct { + // REQUIRED; [Required] The regression model performance metric to calculate. + Metric *RegressionModelPerformanceMetric + + // REQUIRED; [Required] Specifies the data type of the metric threshold. + ModelType *MonitoringModelType + + // The threshold value. If null, a default value will be set depending on the selected metric. + Threshold *MonitoringThreshold +} + +// GetModelPerformanceMetricThresholdBase implements the ModelPerformanceMetricThresholdBaseClassification interface for type +// RegressionModelPerformanceMetricThreshold. +func (r *RegressionModelPerformanceMetricThreshold) GetModelPerformanceMetricThresholdBase() *ModelPerformanceMetricThresholdBase { + return &ModelPerformanceMetricThresholdBase{ + ModelType: r.ModelType, + Threshold: r.Threshold, + } +} + // RegressionTrainingSettings - Regression Training related configuration. type RegressionTrainingSettings struct { // Allowed models for regression task. @@ -6766,6 +11001,19 @@ type RegressionTrainingSettings struct { // Stack ensemble settings for stack ensemble run. StackEnsembleSettings *StackEnsembleSettings + + // TrainingMode mode - Setting to 'auto' is same as setting it to 'non-distributed' for now, however in the future may result + // in mixed mode or heuristics based mode selection. Default is 'auto'. If + // 'Distributed' then only distributed featurization is used and distributed algorithms are chosen. If 'NonDistributed' then + // only non distributed algorithms are chosen. + TrainingMode *TrainingMode +} + +type RequestLogging struct { + // For payload logging, we only collect payload by default. If customers also want to collect the specified headers, they + // can set them in captureHeaders so that backend will collect those headers along + // with payload. + CaptureHeaders []*string } // Resource - Common fields that are returned in the response for all Azure Resource Manager resources @@ -6801,6 +11049,13 @@ type ResourceConfiguration struct { // Optional type of VM used as supported by the compute target. InstanceType *string + // Locations where the job can run. + Locations []*string + + // Optional max allowed number of instances or nodes to be used by the compute target. For use with elastic training, currently + // supported by PyTorch distribution type only. + MaxInstanceCount *int32 + // Additional properties bag. Properties map[string]any } @@ -6856,23 +11111,36 @@ type SASAuthTypeWorkspaceConnectionProperties struct { // Category of the connection Category *ConnectionCategory Credentials *WorkspaceConnectionSharedAccessSignature - Target *string - - // Value details of the workspace connection. - Value *string + ExpiryTime *time.Time - // format for the workspace connection value - ValueFormat *ValueFormat + // Anything + Metadata any + Target *string } // GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type SASAuthTypeWorkspaceConnectionProperties. func (s *SASAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { return &WorkspaceConnectionPropertiesV2{ - AuthType: s.AuthType, - Category: s.Category, - Target: s.Target, - Value: s.Value, - ValueFormat: s.ValueFormat, + AuthType: s.AuthType, + Category: s.Category, + ExpiryTime: s.ExpiryTime, + Metadata: s.Metadata, + Target: s.Target, + } +} + +type SASCredentialDto struct { + // REQUIRED; [Required] Credential type used to authentication with storage. + CredentialType *PendingUploadCredentialType + + // Full SAS Uri, including the storage, container/blob path and SAS token + SasURI *string +} + +// GetPendingUploadCredentialDto implements the PendingUploadCredentialDtoClassification interface for type SASCredentialDto. +func (s *SASCredentialDto) GetPendingUploadCredentialDto() *PendingUploadCredentialDto { + return &PendingUploadCredentialDto{ + CredentialType: s.CredentialType, } } @@ -7053,7 +11321,7 @@ type Schedule struct { // ScheduleActionBaseClassification provides polymorphic access to related types. // Call the interface's GetScheduleActionBase() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *EndpointScheduleAction, *JobScheduleAction, *ScheduleActionBase +// - *CreateMonitorAction, *EndpointScheduleAction, *ImportDataAction, *JobScheduleAction, *ScheduleActionBase type ScheduleActionBaseClassification interface { // GetScheduleActionBase returns the ScheduleActionBase content of the underlying type. GetScheduleActionBase() *ScheduleActionBase @@ -7148,7 +11416,7 @@ type ScriptReference struct { // The location of scripts in the mounted volume. ScriptData *string - // The storage source of the script: workspace. + // The storage source of the script: inline, workspace. ScriptSource *string // Optional time period passed to timeout command. @@ -7182,11 +11450,44 @@ type Seasonality struct { // GetSeasonality implements the SeasonalityClassification interface for type Seasonality. func (s *Seasonality) GetSeasonality() *Seasonality { return s } +// SecretConfiguration - Secret Configuration definition. +type SecretConfiguration struct { + // Secret Uri. Sample Uri : https://myvault.vault.azure.net/secrets/mysecretname/secretversion + URI *string + + // Name of secret in workspace key vault. + WorkspaceSecretName *string +} + type ServiceManagedResourcesSettings struct { - // The settings for the service managed cosmosdb account. CosmosDb *CosmosDbSettings } +type ServicePrincipalAuthTypeWorkspaceConnectionProperties struct { + // REQUIRED; Authentication type of the connection target + AuthType *ConnectionAuthType + + // Category of the connection + Category *ConnectionCategory + Credentials *WorkspaceConnectionServicePrincipal + ExpiryTime *time.Time + + // Anything + Metadata any + Target *string +} + +// GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type ServicePrincipalAuthTypeWorkspaceConnectionProperties. +func (s *ServicePrincipalAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { + return &WorkspaceConnectionPropertiesV2{ + AuthType: s.AuthType, + Category: s.Category, + ExpiryTime: s.ExpiryTime, + Metadata: s.Metadata, + Target: s.Target, + } +} + // ServicePrincipalDatastoreCredentials - Service Principal datastore credentials configuration. type ServicePrincipalDatastoreCredentials struct { // REQUIRED; [Required] Service principal client ID. @@ -7231,6 +11532,43 @@ func (s *ServicePrincipalDatastoreSecrets) GetDatastoreSecrets() *DatastoreSecre } } +// ServiceTagDestination - Service Tag destination for a Service Tag Outbound Rule for the managed network of a machine learning +// workspace. +type ServiceTagDestination struct { + // The action enum for networking rule. + Action *RuleAction + PortRanges *string + Protocol *string + ServiceTag *string + + // READ-ONLY; Optional, if provided, the ServiceTag property will be ignored. + AddressPrefixes []*string +} + +// ServiceTagOutboundRule - Service Tag Outbound Rule for the managed network of a machine learning workspace. +type ServiceTagOutboundRule struct { + // REQUIRED; Type of a managed network Outbound Rule of a machine learning workspace. + Type *RuleType + + // Category of a managed network Outbound Rule of a machine learning workspace. + Category *RuleCategory + + // Service Tag destination for a Service Tag Outbound Rule for the managed network of a machine learning workspace. + Destination *ServiceTagDestination + + // Type of a managed network Outbound Rule of a machine learning workspace. + Status *RuleStatus +} + +// GetOutboundRule implements the OutboundRuleClassification interface for type ServiceTagOutboundRule. +func (s *ServiceTagOutboundRule) GetOutboundRule() *OutboundRule { + return &OutboundRule{ + Category: s.Category, + Status: s.Status, + Type: s.Type, + } +} + // SetupScripts - Details of customized scripts to execute for setting up the cluster. type SetupScripts struct { // Customized setup scripts @@ -7238,26 +11576,187 @@ type SetupScripts struct { } type SharedPrivateLinkResource struct { - // Unique name of the private link. + // Unique name of the private link Name *string - // Resource properties. + // Properties of a shared private link resource. Properties *SharedPrivateLinkResourceProperty } // SharedPrivateLinkResourceProperty - Properties of a shared private link resource. type SharedPrivateLinkResourceProperty struct { - // The private link resource group id. + // group id of the private link GroupID *string - // The resource id that private link links to. - PrivateLinkResourceID *string + // the resource id that private link links to + PrivateLinkResourceID *string + + // Request message + RequestMessage *string + + // Connection status of the service consumer with the service provider + Status *EndpointServiceConnectionStatus +} + +// SparkJob - Spark job definition. +type SparkJob struct { + // REQUIRED; [Required] ARM resource ID of the code asset. + CodeID *string + + // REQUIRED; [Required] The entry to execute on startup of the job. + Entry SparkJobEntryClassification + + // REQUIRED; [Required] Specifies the type of job. + JobType *JobType + + // Archive files used in the job. + Archives []*string + + // Arguments for the job. + Args *string + + // ARM resource ID of the component resource. + ComponentID *string + + // ARM resource ID of the compute resource. + ComputeID *string + + // Spark configured properties. + Conf map[string]*string + + // The asset description text. + Description *string + + // Display name of job. + DisplayName *string + + // The ARM resource ID of the Environment specification for the job. + EnvironmentID *string + + // The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment. + ExperimentName *string + + // Files used in the job. + Files []*string + + // Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken + // if null. + Identity IdentityConfigurationClassification + + // Mapping of input data bindings used in the job. + Inputs map[string]JobInputClassification + + // Is the asset archived? + IsArchived *bool + + // Jar files used in the job. + Jars []*string + + // Notification setting for the job + NotificationSetting *NotificationSetting + + // Mapping of output data bindings used in the job. + Outputs map[string]JobOutputClassification + + // The asset property dictionary. + Properties map[string]*string + + // Python files used in the job. + PyFiles []*string + + // Queue settings for the job + QueueSettings *QueueSettings + + // Compute Resource configuration for the job. + Resources *SparkResourceConfiguration + + // Configuration for secrets to be made available during runtime. + SecretsConfiguration map[string]*SecretConfiguration + + // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. + Services map[string]*JobService + + // Tag dictionary. Tags can be added, removed, and updated. + Tags map[string]*string + + // READ-ONLY; Status of the job. + Status *JobStatus +} + +// GetJobBaseProperties implements the JobBasePropertiesClassification interface for type SparkJob. +func (s *SparkJob) GetJobBaseProperties() *JobBaseProperties { + return &JobBaseProperties{ + ComponentID: s.ComponentID, + ComputeID: s.ComputeID, + DisplayName: s.DisplayName, + ExperimentName: s.ExperimentName, + Identity: s.Identity, + IsArchived: s.IsArchived, + JobType: s.JobType, + NotificationSetting: s.NotificationSetting, + SecretsConfiguration: s.SecretsConfiguration, + Services: s.Services, + Status: s.Status, + Description: s.Description, + Properties: s.Properties, + Tags: s.Tags, + } +} + +// SparkJobEntryClassification provides polymorphic access to related types. +// Call the interface's GetSparkJobEntry() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *SparkJobEntry, *SparkJobPythonEntry, *SparkJobScalaEntry +type SparkJobEntryClassification interface { + // GetSparkJobEntry returns the SparkJobEntry content of the underlying type. + GetSparkJobEntry() *SparkJobEntry +} + +// SparkJobEntry - Spark job entry point definition. +type SparkJobEntry struct { + // REQUIRED; [Required] Type of the job's entry point. + SparkJobEntryType *SparkJobEntryType +} + +// GetSparkJobEntry implements the SparkJobEntryClassification interface for type SparkJobEntry. +func (s *SparkJobEntry) GetSparkJobEntry() *SparkJobEntry { return s } + +type SparkJobPythonEntry struct { + // REQUIRED; [Required] Relative python file path for job entry point. + File *string + + // REQUIRED; [Required] Type of the job's entry point. + SparkJobEntryType *SparkJobEntryType +} + +// GetSparkJobEntry implements the SparkJobEntryClassification interface for type SparkJobPythonEntry. +func (s *SparkJobPythonEntry) GetSparkJobEntry() *SparkJobEntry { + return &SparkJobEntry{ + SparkJobEntryType: s.SparkJobEntryType, + } +} + +type SparkJobScalaEntry struct { + // REQUIRED; [Required] Scala class name used as entry point. + ClassName *string - // Request message. - RequestMessage *string + // REQUIRED; [Required] Type of the job's entry point. + SparkJobEntryType *SparkJobEntryType +} + +// GetSparkJobEntry implements the SparkJobEntryClassification interface for type SparkJobScalaEntry. +func (s *SparkJobScalaEntry) GetSparkJobEntry() *SparkJobEntry { + return &SparkJobEntry{ + SparkJobEntryType: s.SparkJobEntryType, + } +} + +type SparkResourceConfiguration struct { + // Optional type of VM used as supported by the compute target. + InstanceType *string - // Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. - Status *PrivateEndpointServiceConnectionStatus + // Version of spark runtime used for the job. + RuntimeVersion *string } // StackEnsembleSettings - Advances setting to customize StackEnsemble run. @@ -7273,6 +11772,68 @@ type StackEnsembleSettings struct { StackMetaLearnerType *StackMetaLearnerType } +// StaticInputData - Static input data definition. +type StaticInputData struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + InputDataType *MonitoringInputDataType + + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType + + // REQUIRED; [Required] Input Asset URI. + URI *string + + // REQUIRED; [Required] The end date of the data window. + WindowEnd *time.Time + + // REQUIRED; [Required] The start date of the data window. + WindowStart *time.Time + + // Mapping of column names to special uses. + Columns map[string]*string + + // The context metadata of the data source. + DataContext *string + + // The ARM resource ID of the component resource used to preprocess the data. + PreprocessingComponentID *string +} + +// GetMonitoringInputDataBase implements the MonitoringInputDataBaseClassification interface for type StaticInputData. +func (s *StaticInputData) GetMonitoringInputDataBase() *MonitoringInputDataBase { + return &MonitoringInputDataBase{ + Columns: s.Columns, + DataContext: s.DataContext, + InputDataType: s.InputDataType, + JobInputType: s.JobInputType, + URI: s.URI, + } +} + +// StatusMessage - Active message associated with project +type StatusMessage struct { + // READ-ONLY; Service-defined message code. + Code *string + + // READ-ONLY; Time in UTC at which the message was created. + CreatedDateTime *time.Time + + // READ-ONLY; Severity level of message. + Level *StatusMessageLevel + + // READ-ONLY; A human-readable representation of the message code. + Message *string +} + +// StorageAccountDetails - Details of storage account to be used for the Registry +type StorageAccountDetails struct { + // Details of system created storage account to be used for the registry + SystemCreatedStorageAccount *SystemCreatedStorageAccount + + // Details of user created storage account to be used for the registry + UserCreatedStorageAccount *UserCreatedStorageAccount +} + // SweepJob - Sweep job definition. type SweepJob struct { // REQUIRED; [Required] Specifies the type of job. @@ -7322,12 +11883,21 @@ type SweepJob struct { // Sweep Job limit. Limits *SweepJobLimits + // Notification setting for the job + NotificationSetting *NotificationSetting + // Mapping of output data bindings used in the job. Outputs map[string]JobOutputClassification // The asset property dictionary. Properties map[string]*string + // Queue settings for the job + QueueSettings *QueueSettings + + // Configuration for secrets to be made available during runtime. + SecretsConfiguration map[string]*SecretConfiguration + // List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject. Services map[string]*JobService @@ -7341,18 +11911,20 @@ type SweepJob struct { // GetJobBaseProperties implements the JobBasePropertiesClassification interface for type SweepJob. func (s *SweepJob) GetJobBaseProperties() *JobBaseProperties { return &JobBaseProperties{ - ComponentID: s.ComponentID, - ComputeID: s.ComputeID, - DisplayName: s.DisplayName, - ExperimentName: s.ExperimentName, - Identity: s.Identity, - IsArchived: s.IsArchived, - JobType: s.JobType, - Services: s.Services, - Status: s.Status, - Description: s.Description, - Properties: s.Properties, - Tags: s.Tags, + ComponentID: s.ComponentID, + ComputeID: s.ComputeID, + DisplayName: s.DisplayName, + ExperimentName: s.ExperimentName, + Identity: s.Identity, + IsArchived: s.IsArchived, + JobType: s.JobType, + NotificationSetting: s.NotificationSetting, + SecretsConfiguration: s.SecretsConfiguration, + Services: s.Services, + Status: s.Status, + Description: s.Description, + Properties: s.Properties, + Tags: s.Tags, } } @@ -7466,6 +12038,35 @@ type SynapseSparkProperties struct { WorkspaceName *string } +type SystemCreatedAcrAccount struct { + // Name of the ACR account + AcrAccountName *string + + // SKU of the ACR account + AcrAccountSKU *string + + // This is populated once the ACR account is created. + ArmResourceID *ArmResourceID +} + +type SystemCreatedStorageAccount struct { + // Public blob access allowed + AllowBlobPublicAccess *bool + + // This is populated once the storage account is created. + ArmResourceID *ArmResourceID + + // HNS enabled for storage account + StorageAccountHnsEnabled *bool + + // Name of the storage account + StorageAccountName *string + + // Allowed values: "StandardLRS", "StandardGRS", "StandardRAGRS", "StandardZRS", "StandardGZRS", "StandardRAGZRS", "PremiumLRS", + // "PremiumZRS" + StorageAccountType *string +} + // SystemData - Metadata pertaining to creation and last modification of the resource. type SystemData struct { // The timestamp of resource creation (UTC). @@ -7499,6 +12100,139 @@ type SystemService struct { Version *string } +// TableFixedParameters - Fixed training parameters that won't be swept over during AutoML Table training. +type TableFixedParameters struct { + // Specify the boosting type, e.g gbdt for XGBoost. + Booster *string + + // Specify the boosting type, e.g gbdt for LightGBM. + BoostingType *string + + // Specify the grow policy, which controls the way new nodes are added to the tree. + GrowPolicy *string + + // The learning rate for the training procedure. + LearningRate *float64 + + // Specify the Maximum number of discrete bins to bucket continuous features . + MaxBin *int32 + + // Specify the max depth to limit the tree depth explicitly. + MaxDepth *int32 + + // Specify the max leaves to limit the tree leaves explicitly. + MaxLeaves *int32 + + // The minimum number of data per leaf. + MinDataInLeaf *int32 + + // Minimum loss reduction required to make a further partition on a leaf node of the tree. + MinSplitGain *float64 + + // The name of the model to train. + ModelName *string + + // Specify the number of trees (or rounds) in an model. + NEstimators *int32 + + // Specify the number of leaves. + NumLeaves *int32 + + // The name of the preprocessor to use. + PreprocessorName *string + + // L1 regularization term on weights. + RegAlpha *float64 + + // L2 regularization term on weights. + RegLambda *float64 + + // Subsample ratio of the training instance. + Subsample *float64 + + // Frequency of subsample. + SubsampleFreq *float64 + + // Specify the tree method. + TreeMethod *string + + // If true, center before scaling the data with StandardScalar. + WithMean *bool + + // If true, scaling the data with Unit Variance with StandardScalar. + WithStd *bool +} + +type TableParameterSubspace struct { + // Specify the boosting type, e.g gbdt for XGBoost. + Booster *string + + // Specify the boosting type, e.g gbdt for LightGBM. + BoostingType *string + + // Specify the grow policy, which controls the way new nodes are added to the tree. + GrowPolicy *string + + // The learning rate for the training procedure. + LearningRate *string + + // Specify the Maximum number of discrete bins to bucket continuous features . + MaxBin *string + + // Specify the max depth to limit the tree depth explicitly. + MaxDepth *string + + // Specify the max leaves to limit the tree leaves explicitly. + MaxLeaves *string + + // The minimum number of data per leaf. + MinDataInLeaf *string + + // Minimum loss reduction required to make a further partition on a leaf node of the tree. + MinSplitGain *string + + // The name of the model to train. + ModelName *string + + // Specify the number of trees (or rounds) in an model. + NEstimators *string + + // Specify the number of leaves. + NumLeaves *string + + // The name of the preprocessor to use. + PreprocessorName *string + + // L1 regularization term on weights. + RegAlpha *string + + // L2 regularization term on weights. + RegLambda *string + + // Subsample ratio of the training instance. + Subsample *string + + // Frequency of subsample + SubsampleFreq *string + + // Specify the tree method. + TreeMethod *string + + // If true, center before scaling the data with StandardScalar. + WithMean *string + + // If true, scaling the data with Unit Variance with StandardScalar. + WithStd *string +} + +type TableSweepSettings struct { + // REQUIRED; [Required] Type of sampling algorithm. + SamplingAlgorithm *SamplingAlgorithmType + + // Type of early termination policy for the sweeping job. + EarlyTermination EarlyTerminationPolicyClassification +} + // TableVertical - Abstract class for AutoML tasks that use table dataset as input - such as Classification/Regression/Forecasting. type TableVertical struct { // Columns to use for CVSplit data. @@ -7507,12 +12241,21 @@ type TableVertical struct { // Featurization inputs needed for AutoML job. FeaturizationSettings *TableVerticalFeaturizationSettings + // Model/training parameters that will remain constant throughout training. + FixedParameters *TableFixedParameters + // Execution constraints for AutoMLJob. LimitSettings *TableVerticalLimitSettings // Number of cross validation folds to be applied on training dataset when validation dataset is not provided. NCrossValidations NCrossValidationsClassification + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*TableParameterSubspace + + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *TableSweepSettings + // Test data input. TestData *MLTableJobInput @@ -7571,9 +12314,18 @@ type TableVerticalLimitSettings struct { // Max cores per iteration. MaxCoresPerTrial *int32 + // Maximum nodes to use for the experiment. + MaxNodes *int32 + // Number of iterations. MaxTrials *int32 + // Number of concurrent sweeping runs that user wants to trigger. + SweepConcurrentTrials *int32 + + // Number of sweeping runs that user wants to trigger. + SweepTrials *int32 + // AutoML job timeout. Timeout *string @@ -7671,6 +12423,9 @@ type TextClassification struct { // Featurization inputs needed for AutoML job. FeaturizationSettings *NlpVerticalFeaturizationSettings + // Model/training parameters that will remain constant throughout training. + FixedParameters *NlpFixedParameters + // Execution constraints for AutoMLJob. LimitSettings *NlpVerticalLimitSettings @@ -7680,6 +12435,12 @@ type TextClassification struct { // Primary metric for Text-Classification task. PrimaryMetric *ClassificationPrimaryMetrics + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*NlpParameterSubspace + + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *NlpSweepSettings + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. TargetColumnName *string @@ -7708,12 +12469,21 @@ type TextClassificationMultilabel struct { // Featurization inputs needed for AutoML job. FeaturizationSettings *NlpVerticalFeaturizationSettings + // Model/training parameters that will remain constant throughout training. + FixedParameters *NlpFixedParameters + // Execution constraints for AutoMLJob. LimitSettings *NlpVerticalLimitSettings // Log verbosity for the job. LogVerbosity *LogVerbosity + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*NlpParameterSubspace + + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *NlpSweepSettings + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. TargetColumnName *string @@ -7746,12 +12516,21 @@ type TextNer struct { // Featurization inputs needed for AutoML job. FeaturizationSettings *NlpVerticalFeaturizationSettings + // Model/training parameters that will remain constant throughout training. + FixedParameters *NlpFixedParameters + // Execution constraints for AutoMLJob. LimitSettings *NlpVerticalLimitSettings // Log verbosity for the job. LogVerbosity *LogVerbosity + // Search space for sampling different combinations of models and their hyperparameters. + SearchSpace []*NlpParameterSubspace + + // Settings for model sweeping and hyperparameter tuning. + SweepSettings *NlpSweepSettings + // Target column name: This is prediction values column. Also known as label column name in context of classification tasks. TargetColumnName *string @@ -7772,6 +12551,26 @@ func (t *TextNer) GetAutoMLVertical() *AutoMLVertical { } } +type TmpfsOptions struct { + // Mention the Tmpfs size + Size *int32 +} + +type TopNFeaturesByAttribution struct { + // REQUIRED; [Required] Specifies the feature filter to leverage when selecting features to calculate metrics over. + FilterType *MonitoringFeatureFilterType + + // The number of top features to include. + Top *int32 +} + +// GetMonitoringFeatureFilterBase implements the MonitoringFeatureFilterBaseClassification interface for type TopNFeaturesByAttribution. +func (t *TopNFeaturesByAttribution) GetMonitoringFeatureFilterBase() *MonitoringFeatureFilterBase { + return &MonitoringFeatureFilterBase{ + FilterType: t.FilterType, + } +} + // TrackedResource - The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' // and a 'location' type TrackedResource struct { @@ -7794,6 +12593,44 @@ type TrackedResource struct { Type *string } +// TrailingInputData - Trailing input data definition. +type TrailingInputData struct { + // REQUIRED; [Required] Specifies the type of signal to monitor. + InputDataType *MonitoringInputDataType + + // REQUIRED; [Required] Specifies the type of job. + JobInputType *JobInputType + + // REQUIRED; [Required] Input Asset URI. + URI *string + + // REQUIRED; [Required] The time offset between the end of the data window and the monitor's current run time. + WindowOffset *string + + // REQUIRED; [Required] The size of the trailing data window. + WindowSize *string + + // Mapping of column names to special uses. + Columns map[string]*string + + // The context metadata of the data source. + DataContext *string + + // The ARM resource ID of the component resource used to preprocess the data. + PreprocessingComponentID *string +} + +// GetMonitoringInputDataBase implements the MonitoringInputDataBaseClassification interface for type TrailingInputData. +func (t *TrailingInputData) GetMonitoringInputDataBase() *MonitoringInputDataBase { + return &MonitoringInputDataBase{ + Columns: t.Columns, + DataContext: t.DataContext, + InputDataType: t.InputDataType, + JobInputType: t.JobInputType, + URI: t.URI, + } +} + // TrainingSettings - Training related configuration. type TrainingSettings struct { // Enable recommendation of DNN models. @@ -7818,6 +12655,12 @@ type TrainingSettings struct { // Stack ensemble settings for stack ensemble run. StackEnsembleSettings *StackEnsembleSettings + + // TrainingMode mode - Setting to 'auto' is same as setting it to 'non-distributed' for now, however in the future may result + // in mixed mode or heuristics based mode selection. Default is 'auto'. If + // 'Distributed' then only distributed featurization is used and distributed algorithms are chosen. If 'NonDistributed' then + // only non distributed algorithms are chosen. + TrainingMode *TrainingMode } // TrialComponent - Trial component definition. @@ -7870,6 +12713,22 @@ type TriggerBase struct { // GetTriggerBase implements the TriggerBaseClassification interface for type TriggerBase. func (t *TriggerBase) GetTriggerBase() *TriggerBase { return t } +// TritonInferencingServer - Triton inferencing server configurations. +type TritonInferencingServer struct { + // REQUIRED; [Required] Inferencing server type for various targets. + ServerType *InferencingServerType + + // Inference configuration for Triton. + InferenceConfiguration *OnlineInferenceConfiguration +} + +// GetInferencingServer implements the InferencingServerClassification interface for type TritonInferencingServer. +func (t *TritonInferencingServer) GetInferencingServer() *InferencingServer { + return &InferencingServer{ + ServerType: t.ServerType, + } +} + type TritonModelJobInput struct { // REQUIRED; [Required] Specifies the type of job. JobInputType *JobInputType @@ -7896,6 +12755,15 @@ type TritonModelJobOutput struct { // REQUIRED; [Required] Specifies the type of job. JobOutputType *JobOutputType + // Output Asset Name. + AssetName *string + + // Output Asset Version. + AssetVersion *string + + // Auto delete setting of output data asset. + AutoDeleteSetting *AutoDeleteSetting + // Description for the output. Description *string @@ -7944,21 +12812,31 @@ type URIFileDataVersion struct { // REQUIRED; [Required] Specifies the type of data. DataType *DataType - // REQUIRED; [Required] Uri of the data. Usage/meaning depends on Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20221001.Assets.DataVersionBase.DataType + // REQUIRED; [Required] Uri of the data. Example: https://go.microsoft.com/fwlink/?linkid=2202330 DataURI *string + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + // The asset description text. Description *string - // If the name version are system generated (anonymous registration). + // Intellectual Property details. Used if data is an Intellectual Property. + IntellectualProperty *IntellectualProperty + + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous IsAnonymous *bool - // Is the asset archived? + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived IsArchived *bool // The asset property dictionary. Properties map[string]*string + // Stage in the data lifecycle assigned to this data asset + Stage *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string } @@ -7966,13 +12844,16 @@ type URIFileDataVersion struct { // GetDataVersionBaseProperties implements the DataVersionBasePropertiesClassification interface for type URIFileDataVersion. func (u *URIFileDataVersion) GetDataVersionBaseProperties() *DataVersionBaseProperties { return &DataVersionBaseProperties{ - DataType: u.DataType, - DataURI: u.DataURI, - IsAnonymous: u.IsAnonymous, - IsArchived: u.IsArchived, - Description: u.Description, - Properties: u.Properties, - Tags: u.Tags, + DataType: u.DataType, + DataURI: u.DataURI, + IntellectualProperty: u.IntellectualProperty, + Stage: u.Stage, + AutoDeleteSetting: u.AutoDeleteSetting, + IsAnonymous: u.IsAnonymous, + IsArchived: u.IsArchived, + Description: u.Description, + Properties: u.Properties, + Tags: u.Tags, } } @@ -8002,6 +12883,15 @@ type URIFileJobOutput struct { // REQUIRED; [Required] Specifies the type of job. JobOutputType *JobOutputType + // Output Asset Name. + AssetName *string + + // Output Asset Version. + AssetVersion *string + + // Auto delete setting of output data asset. + AutoDeleteSetting *AutoDeleteSetting + // Description for the output. Description *string @@ -8025,21 +12915,31 @@ type URIFolderDataVersion struct { // REQUIRED; [Required] Specifies the type of data. DataType *DataType - // REQUIRED; [Required] Uri of the data. Usage/meaning depends on Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20221001.Assets.DataVersionBase.DataType + // REQUIRED; [Required] Uri of the data. Example: https://go.microsoft.com/fwlink/?linkid=2202330 DataURI *string + // Specifies the lifecycle setting of managed data asset. + AutoDeleteSetting *AutoDeleteSetting + // The asset description text. Description *string - // If the name version are system generated (anonymous registration). + // Intellectual Property details. Used if data is an Intellectual Property. + IntellectualProperty *IntellectualProperty + + // If the name version are system generated (anonymous registration). For types where Stage is defined, when Stage is provided + // it will be used to populate IsAnonymous IsAnonymous *bool - // Is the asset archived? + // Is the asset archived? For types where Stage is defined, when Stage is provided it will be used to populate IsArchived IsArchived *bool // The asset property dictionary. Properties map[string]*string + // Stage in the data lifecycle assigned to this data asset + Stage *string + // Tag dictionary. Tags can be added, removed, and updated. Tags map[string]*string } @@ -8047,13 +12947,16 @@ type URIFolderDataVersion struct { // GetDataVersionBaseProperties implements the DataVersionBasePropertiesClassification interface for type URIFolderDataVersion. func (u *URIFolderDataVersion) GetDataVersionBaseProperties() *DataVersionBaseProperties { return &DataVersionBaseProperties{ - DataType: u.DataType, - DataURI: u.DataURI, - IsAnonymous: u.IsAnonymous, - IsArchived: u.IsArchived, - Description: u.Description, - Properties: u.Properties, - Tags: u.Tags, + DataType: u.DataType, + DataURI: u.DataURI, + IntellectualProperty: u.IntellectualProperty, + Stage: u.Stage, + AutoDeleteSetting: u.AutoDeleteSetting, + IsAnonymous: u.IsAnonymous, + IsArchived: u.IsArchived, + Description: u.Description, + Properties: u.Properties, + Tags: u.Tags, } } @@ -8083,6 +12986,15 @@ type URIFolderJobOutput struct { // REQUIRED; [Required] Specifies the type of job. JobOutputType *JobOutputType + // Output Asset Name. + AssetName *string + + // Output Asset Version. + AssetVersion *string + + // Auto delete setting of output data asset. + AutoDeleteSetting *AutoDeleteSetting + // Description for the output. Description *string @@ -8188,6 +13100,16 @@ type UserAssignedIdentity struct { PrincipalID *string } +type UserCreatedAcrAccount struct { + // ARM ResourceId of a resource + ArmResourceID *ArmResourceID +} + +type UserCreatedStorageAccount struct { + // ARM ResourceId of a resource + ArmResourceID *ArmResourceID +} + // UserIdentity - User identity configuration. type UserIdentity struct { // REQUIRED; [Required] Specifies the type of identity framework. @@ -8208,23 +13130,21 @@ type UsernamePasswordAuthTypeWorkspaceConnectionProperties struct { // Category of the connection Category *ConnectionCategory Credentials *WorkspaceConnectionUsernamePassword - Target *string - - // Value details of the workspace connection. - Value *string + ExpiryTime *time.Time - // format for the workspace connection value - ValueFormat *ValueFormat + // Anything + Metadata any + Target *string } // GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type UsernamePasswordAuthTypeWorkspaceConnectionProperties. func (u *UsernamePasswordAuthTypeWorkspaceConnectionProperties) GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 { return &WorkspaceConnectionPropertiesV2{ - AuthType: u.AuthType, - Category: u.Category, - Target: u.Target, - Value: u.Value, - ValueFormat: u.ValueFormat, + AuthType: u.AuthType, + Category: u.Category, + ExpiryTime: u.ExpiryTime, + Metadata: u.Metadata, + Target: u.Target, } } @@ -8392,21 +13312,72 @@ type VirtualMachineSizesClientListOptions struct { // placeholder for future optional parameters } +type VolumeDefinition struct { + // Bind Options of the mount + Bind *BindOptions + + // Consistency of the volume + Consistency *string + + // Indicate whether to mount volume as readOnly. Default value for this is false. + ReadOnly *bool + + // Source of the mount. For bind mounts this is the host path. + Source *string + + // Target of the mount. For bind mounts this is the path in the container. + Target *string + + // tmpfs option of the mount + Tmpfs *TmpfsOptions + + // Type of Volume Definition. Possible Values: bind,volume,tmpfs,npipe + Type *VolumeDefinitionType + + // Volume Options of the mount + Volume *VolumeOptions +} + +type VolumeOptions struct { + // Indicate whether volume is nocopy + Nocopy *bool +} + +// WebhookClassification provides polymorphic access to related types. +// Call the interface's GetWebhook() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AzureDevOpsWebhook, *Webhook +type WebhookClassification interface { + // GetWebhook returns the Webhook content of the underlying type. + GetWebhook() *Webhook +} + +// Webhook base +type Webhook struct { + // REQUIRED; [Required] Specifies the type of service to send a callback + WebhookType *WebhookType + + // Send callback on a specified notification event + EventType *string +} + +// GetWebhook implements the WebhookClassification interface for type Webhook. +func (w *Webhook) GetWebhook() *Webhook { return w } + // Workspace - An object that represents a machine learning workspace. type Workspace struct { - // The identity of the resource. - Identity *ManagedServiceIdentity + // REQUIRED; Additional attributes of the entity. + Properties *WorkspaceProperties - // Specifies the location of the resource. + // Managed service identity (system assigned and/or user assigned identities) + Identity *ManagedServiceIdentity + Kind *string Location *string - // The properties of the machine learning workspace. - Properties *WorkspaceProperties - - // The sku of the workspace. + // Optional. This field is required to be implemented by the RP because AML is supporting more than one tier SKU *SKU - // Contains resource tags defined as key/value pairs. + // Dictionary of Tags map[string]*string // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} @@ -8422,6 +13393,16 @@ type Workspace struct { Type *string } +// WorkspaceConnectionAPIKey - Api key object for workspace connection credential. +type WorkspaceConnectionAPIKey struct { + Key *string +} + +type WorkspaceConnectionAccessKey struct { + AccessKeyID *string + SecretAccessKey *string +} + type WorkspaceConnectionManagedIdentity struct { ClientID *string ResourceID *string @@ -8434,8 +13415,10 @@ type WorkspaceConnectionPersonalAccessToken struct { // WorkspaceConnectionPropertiesV2Classification provides polymorphic access to related types. // Call the interface's GetWorkspaceConnectionPropertiesV2() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: +// - *APIKeyAuthWorkspaceConnectionProperties, *AccessKeyAuthTypeWorkspaceConnectionProperties, *CustomKeysWorkspaceConnectionProperties, // - *ManagedIdentityAuthTypeWorkspaceConnectionProperties, *NoneAuthTypeWorkspaceConnectionProperties, *PATAuthTypeWorkspaceConnectionProperties, -// - *SASAuthTypeWorkspaceConnectionProperties, *UsernamePasswordAuthTypeWorkspaceConnectionProperties, *WorkspaceConnectionPropertiesV2 +// - *SASAuthTypeWorkspaceConnectionProperties, *ServicePrincipalAuthTypeWorkspaceConnectionProperties, *UsernamePasswordAuthTypeWorkspaceConnectionProperties, +// - *WorkspaceConnectionPropertiesV2 type WorkspaceConnectionPropertiesV2Classification interface { // GetWorkspaceConnectionPropertiesV2 returns the WorkspaceConnectionPropertiesV2 content of the underlying type. GetWorkspaceConnectionPropertiesV2() *WorkspaceConnectionPropertiesV2 @@ -8446,14 +13429,12 @@ type WorkspaceConnectionPropertiesV2 struct { AuthType *ConnectionAuthType // Category of the connection - Category *ConnectionCategory - Target *string - - // Value details of the workspace connection. - Value *string + Category *ConnectionCategory + ExpiryTime *time.Time - // format for the workspace connection value - ValueFormat *ValueFormat + // Anything + Metadata any + Target *string } // GetWorkspaceConnectionPropertiesV2 implements the WorkspaceConnectionPropertiesV2Classification interface for type WorkspaceConnectionPropertiesV2. @@ -8479,16 +13460,26 @@ type WorkspaceConnectionPropertiesV2BasicResource struct { } type WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult struct { - Value []*WorkspaceConnectionPropertiesV2BasicResource - - // READ-ONLY NextLink *string + Value []*WorkspaceConnectionPropertiesV2BasicResource +} + +type WorkspaceConnectionServicePrincipal struct { + ClientID *string + ClientSecret *string + TenantID *string } type WorkspaceConnectionSharedAccessSignature struct { Sas *string } +// WorkspaceConnectionUpdateParameter - The properties that the machine learning workspace connection will be updated with. +type WorkspaceConnectionUpdateParameter struct { + // The properties that the machine learning workspace connection will be updated with. + Properties WorkspaceConnectionPropertiesV2Classification +} + type WorkspaceConnectionUsernamePassword struct { Password *string Username *string @@ -8496,7 +13487,8 @@ type WorkspaceConnectionUsernamePassword struct { // WorkspaceConnectionsClientCreateOptions contains the optional parameters for the WorkspaceConnectionsClient.Create method. type WorkspaceConnectionsClientCreateOptions struct { - // placeholder for future optional parameters + // The object for creating or updating a new workspace connection + Body *WorkspaceConnectionPropertiesV2BasicResource } // WorkspaceConnectionsClientDeleteOptions contains the optional parameters for the WorkspaceConnectionsClient.Delete method. @@ -8518,14 +13510,32 @@ type WorkspaceConnectionsClientListOptions struct { Target *string } +// WorkspaceConnectionsClientListSecretsOptions contains the optional parameters for the WorkspaceConnectionsClient.ListSecrets +// method. +type WorkspaceConnectionsClientListSecretsOptions struct { + // placeholder for future optional parameters +} + +// WorkspaceConnectionsClientUpdateOptions contains the optional parameters for the WorkspaceConnectionsClient.Update method. +type WorkspaceConnectionsClientUpdateOptions struct { + // Parameters for workspace connection update. + Body *WorkspaceConnectionUpdateParameter +} + // WorkspaceFeaturesClientListOptions contains the optional parameters for the WorkspaceFeaturesClient.NewListPager method. type WorkspaceFeaturesClientListOptions struct { // placeholder for future optional parameters } +// WorkspaceHubConfig - WorkspaceHub's configuration object. +type WorkspaceHubConfig struct { + AdditionalWorkspaceStorageAccounts []*string + DefaultWorkspaceResourceGroup *string +} + // WorkspaceListResult - The result of a request to list machine learning workspaces. type WorkspaceListResult struct { - // The URI that can be used to request the next list of machine learning workspaces. + // The link to the next page constructed using the continuationToken. If null, there are no additional pages. NextLink *string // The list of machine learning workspaces. Since this list may be incomplete, the nextLink field should be used to request @@ -8533,13 +13543,24 @@ type WorkspaceListResult struct { Value []*Workspace } +// WorkspacePrivateEndpointResource - The Private Endpoint resource. +type WorkspacePrivateEndpointResource struct { + // READ-ONLY; e.g. /subscriptions/{networkSubscriptionId}/resourceGroups/{rgName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName} + ID *string + + // READ-ONLY; The subnetId that the private endpoint is connected to. + SubnetArmID *string +} + // WorkspaceProperties - The properties of a machine learning workspace. type WorkspaceProperties struct { // The flag to indicate whether to allow public access when behind VNet. AllowPublicAccessWhenBehindVnet *bool // ARM id of the application insights associated with this workspace. - ApplicationInsights *string + ApplicationInsights *string + AssociatedWorkspaces []*string + ContainerRegistries []*string // ARM id of the container registry associated with this workspace. ContainerRegistry *string @@ -8548,28 +13569,36 @@ type WorkspaceProperties struct { Description *string // Url for the discovery service to identify regional endpoints for machine learning experimentation services - DiscoveryURL *string + DiscoveryURL *string + EnableDataIsolation *bool + Encryption *EncryptionProperty + ExistingWorkspaces []*string - // The encryption settings of Azure ML workspace. - Encryption *EncryptionProperty + // Settings for feature store type workspace. + FeatureStoreSettings *FeatureStoreSettings // The friendly name for this workspace. This name in mutable FriendlyName *string // The flag to signal HBI data in the workspace and reduce diagnostic data collected by the service - HbiWorkspace *bool + HbiWorkspace *bool + HubResourceID *string // The compute name for image build ImageBuildCompute *string // ARM id of the key vault associated with this workspace. This cannot be changed once the workspace has been created - KeyVault *string + KeyVault *string + KeyVaults []*string + + // Managed Network settings for a machine learning workspace. + ManagedNetwork *ManagedNetworkSettings // The user assigned identity resource id that represents the workspace identity. PrimaryUserAssignedIdentity *string // Whether requests from Public Network are allowed. - PublicNetworkAccess *PublicNetworkAccess + PublicNetworkAccess *PublicNetworkAccessType // The service managed resource settings. ServiceManagedResourcesSettings *ServiceManagedResourcesSettings @@ -8577,12 +13606,22 @@ type WorkspaceProperties struct { // The list of shared private link resources in this workspace. SharedPrivateLinkResources []*SharedPrivateLinkResource + // Retention time in days after workspace get soft deleted. + SoftDeleteRetentionInDays *int32 + // ARM id of the storage account associated with this workspace. This cannot be changed once the workspace has been created - StorageAccount *string + StorageAccount *string + StorageAccounts []*string + + // The auth mode used for accessing the system datastores of the workspace. + SystemDatastoresAuthMode *string // Enabling v1legacymode may prevent you from using features provided by the v2 API. V1LegacyMode *bool + // WorkspaceHub's configuration object. + WorkspaceHubConfig *WorkspaceHubConfig + // READ-ONLY; The URI associated with this workspace that machine learning flow must point at to set up tracking. MlFlowTrackingURI *string @@ -8613,7 +13652,7 @@ type WorkspaceProperties struct { WorkspaceID *string } -// WorkspacePropertiesUpdateParameters - The parameters for updating the properties of a machine learning workspace. +// WorkspacePropertiesUpdateParameters - The parameters for updating a machine learning workspace. type WorkspacePropertiesUpdateParameters struct { // ARM id of the application insights associated with this workspace. ApplicationInsights *string @@ -8622,33 +13661,47 @@ type WorkspacePropertiesUpdateParameters struct { ContainerRegistry *string // The description of this workspace. - Description *string + Description *string + EnableDataIsolation *bool + Encryption *EncryptionUpdateProperties + + // Settings for feature store type workspace. + FeatureStoreSettings *FeatureStoreSettings - // The friendly name for this workspace. + // The friendly name for this workspace. This name in mutable FriendlyName *string // The compute name for image build ImageBuildCompute *string + // Managed Network settings for a machine learning workspace. + ManagedNetwork *ManagedNetworkSettings + // The user assigned identity resource id that represents the workspace identity. PrimaryUserAssignedIdentity *string // Whether requests from Public Network are allowed. - PublicNetworkAccess *PublicNetworkAccess + PublicNetworkAccess *PublicNetworkAccessType // The service managed resource settings. ServiceManagedResourcesSettings *ServiceManagedResourcesSettings + + // Retention time in days after workspace get soft deleted. + SoftDeleteRetentionInDays *int32 + + // Enabling v1legacymode may prevent you from using features provided by the v2 API. + V1LegacyMode *bool } // WorkspaceUpdateParameters - The parameters for updating a machine learning workspace. type WorkspaceUpdateParameters struct { - // The identity of the resource. + // Managed service identity (system assigned and/or user assigned identities) Identity *ManagedServiceIdentity // The properties that the machine learning workspace will be updated with. Properties *WorkspacePropertiesUpdateParameters - // The sku of the workspace. + // Optional. This field is required to be implemented by the RP because AML is supporting more than one tier SKU *SKU // The resource tags for the machine learning workspace. @@ -8664,6 +13717,8 @@ type WorkspacesClientBeginCreateOrUpdateOptions struct { // WorkspacesClientBeginDeleteOptions contains the optional parameters for the WorkspacesClient.BeginDelete method. type WorkspacesClientBeginDeleteOptions struct { + // Flag to indicate delete is a purge request. + ForceToPurge *bool // Resumes the LRO from the provided token. ResumeToken string } @@ -8671,7 +13726,7 @@ type WorkspacesClientBeginDeleteOptions struct { // WorkspacesClientBeginDiagnoseOptions contains the optional parameters for the WorkspacesClient.BeginDiagnose method. type WorkspacesClientBeginDiagnoseOptions struct { // The parameter of diagnosing workspace health - Parameters *DiagnoseWorkspaceParameters + Body *DiagnoseWorkspaceParameters // Resumes the LRO from the provided token. ResumeToken string } @@ -8703,6 +13758,8 @@ type WorkspacesClientGetOptions struct { // WorkspacesClientListByResourceGroupOptions contains the optional parameters for the WorkspacesClient.NewListByResourceGroupPager // method. type WorkspacesClientListByResourceGroupOptions struct { + // Kind of workspace. + Kind *string // Continuation token for pagination. Skip *string } @@ -8710,6 +13767,8 @@ type WorkspacesClientListByResourceGroupOptions struct { // WorkspacesClientListBySubscriptionOptions contains the optional parameters for the WorkspacesClient.NewListBySubscriptionPager // method. type WorkspacesClientListBySubscriptionOptions struct { + // Kind of workspace. + Kind *string // Continuation token for pagination. Skip *string } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/models_serde.go b/sdk/resourcemanager/machinelearning/armmachinelearning/models_serde.go index 2d236caa6255..72c9944cce08 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/models_serde.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/models_serde.go @@ -169,6 +169,100 @@ func (a *AKSSchemaProperties) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type APIKeyAuthWorkspaceConnectionProperties. +func (a APIKeyAuthWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["authType"] = ConnectionAuthTypeAPIKey + populate(objectMap, "category", a.Category) + populate(objectMap, "credentials", a.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", a.ExpiryTime) + populateAny(objectMap, "metadata", a.Metadata) + populate(objectMap, "target", a.Target) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type APIKeyAuthWorkspaceConnectionProperties. +func (a *APIKeyAuthWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "authType": + err = unpopulate(val, "AuthType", &a.AuthType) + delete(rawMsg, key) + case "category": + err = unpopulate(val, "Category", &a.Category) + delete(rawMsg, key) + case "credentials": + err = unpopulate(val, "Credentials", &a.Credentials) + delete(rawMsg, key) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &a.ExpiryTime) + delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &a.Metadata) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &a.Target) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AccessKeyAuthTypeWorkspaceConnectionProperties. +func (a AccessKeyAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["authType"] = ConnectionAuthTypeAccessKey + populate(objectMap, "category", a.Category) + populate(objectMap, "credentials", a.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", a.ExpiryTime) + populateAny(objectMap, "metadata", a.Metadata) + populate(objectMap, "target", a.Target) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AccessKeyAuthTypeWorkspaceConnectionProperties. +func (a *AccessKeyAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "authType": + err = unpopulate(val, "AuthType", &a.AuthType) + delete(rawMsg, key) + case "category": + err = unpopulate(val, "Category", &a.Category) + delete(rawMsg, key) + case "credentials": + err = unpopulate(val, "Credentials", &a.Credentials) + delete(rawMsg, key) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &a.ExpiryTime) + delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &a.Metadata) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &a.Target) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AccountKeyDatastoreCredentials. func (a AccountKeyDatastoreCredentials) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -231,6 +325,37 @@ func (a *AccountKeyDatastoreSecrets) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AcrDetails. +func (a AcrDetails) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "systemCreatedAcrAccount", a.SystemCreatedAcrAccount) + populate(objectMap, "userCreatedAcrAccount", a.UserCreatedAcrAccount) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AcrDetails. +func (a *AcrDetails) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "systemCreatedAcrAccount": + err = unpopulate(val, "SystemCreatedAcrAccount", &a.SystemCreatedAcrAccount) + delete(rawMsg, key) + case "userCreatedAcrAccount": + err = unpopulate(val, "UserCreatedAcrAccount", &a.UserCreatedAcrAccount) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AksComputeSecrets. func (a AksComputeSecrets) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -344,6 +469,60 @@ func (a *AksNetworkingConfiguration) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AllFeatures. +func (a AllFeatures) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["filterType"] = MonitoringFeatureFilterTypeAllFeatures + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AllFeatures. +func (a *AllFeatures) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "filterType": + err = unpopulate(val, "FilterType", &a.FilterType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AllNodes. +func (a AllNodes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["nodesValueType"] = NodesValueTypeAll + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AllNodes. +func (a *AllNodes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nodesValueType": + err = unpopulate(val, "NodesValueType", &a.NodesValueType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AmlCompute. func (a AmlCompute) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -613,6 +792,7 @@ func (a AmlOperation) MarshalJSON() ([]byte, error) { populate(objectMap, "display", a.Display) populate(objectMap, "isDataAction", a.IsDataAction) populate(objectMap, "name", a.Name) + populate(objectMap, "origin", a.Origin) return json.Marshal(objectMap) } @@ -634,6 +814,9 @@ func (a *AmlOperation) UnmarshalJSON(data []byte) error { case "name": err = unpopulate(val, "Name", &a.Name) delete(rawMsg, key) + case "origin": + err = unpopulate(val, "Origin", &a.Origin) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -642,18 +825,15 @@ func (a *AmlOperation) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type AmlOperationDisplay. -func (a AmlOperationDisplay) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AmlOperationListResult. +func (a AmlOperationListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", a.Description) - populate(objectMap, "operation", a.Operation) - populate(objectMap, "provider", a.Provider) - populate(objectMap, "resource", a.Resource) + populate(objectMap, "value", a.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type AmlOperationDisplay. -func (a *AmlOperationDisplay) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AmlOperationListResult. +func (a *AmlOperationListResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -661,17 +841,8 @@ func (a *AmlOperationDisplay) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &a.Description) - delete(rawMsg, key) - case "operation": - err = unpopulate(val, "Operation", &a.Operation) - delete(rawMsg, key) - case "provider": - err = unpopulate(val, "Provider", &a.Provider) - delete(rawMsg, key) - case "resource": - err = unpopulate(val, "Resource", &a.Resource) + case "value": + err = unpopulate(val, "Value", &a.Value) delete(rawMsg, key) } if err != nil { @@ -681,15 +852,15 @@ func (a *AmlOperationDisplay) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type AmlOperationListResult. -func (a AmlOperationListResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AmlToken. +func (a AmlToken) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "value", a.Value) + objectMap["identityType"] = IdentityConfigurationTypeAMLToken return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type AmlOperationListResult. -func (a *AmlOperationListResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AmlToken. +func (a *AmlToken) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -697,8 +868,8 @@ func (a *AmlOperationListResult) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "value": - err = unpopulate(val, "Value", &a.Value) + case "identityType": + err = unpopulate(val, "IdentityType", &a.IdentityType) delete(rawMsg, key) } if err != nil { @@ -708,15 +879,15 @@ func (a *AmlOperationListResult) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type AmlToken. -func (a AmlToken) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AmlTokenComputeIdentity. +func (a AmlTokenComputeIdentity) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["identityType"] = IdentityConfigurationTypeAMLToken + objectMap["computeIdentityType"] = MonitorComputeIdentityTypeAmlToken return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type AmlToken. -func (a *AmlToken) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AmlTokenComputeIdentity. +func (a *AmlTokenComputeIdentity) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -724,8 +895,8 @@ func (a *AmlToken) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "identityType": - err = unpopulate(val, "IdentityType", &a.IdentityType) + case "computeIdentityType": + err = unpopulate(val, "ComputeIdentityType", &a.ComputeIdentityType) delete(rawMsg, key) } if err != nil { @@ -770,9 +941,37 @@ func (a *AmlUserFeature) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ArmResourceID. +func (a ArmResourceID) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "resourceId", a.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ArmResourceID. +func (a *ArmResourceID) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "resourceId": + err = unpopulate(val, "ResourceID", &a.ResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AssetBase. func (a AssetBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", a.AutoDeleteSetting) populate(objectMap, "description", a.Description) populate(objectMap, "isAnonymous", a.IsAnonymous) populate(objectMap, "isArchived", a.IsArchived) @@ -790,6 +989,9 @@ func (a *AssetBase) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &a.AutoDeleteSetting) + delete(rawMsg, key) case "description": err = unpopulate(val, "Description", &a.Description) delete(rawMsg, key) @@ -894,6 +1096,9 @@ func (a *AssetJobInput) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type AssetJobOutput. func (a AssetJobOutput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "assetName", a.AssetName) + populate(objectMap, "assetVersion", a.AssetVersion) + populate(objectMap, "autoDeleteSetting", a.AutoDeleteSetting) populate(objectMap, "mode", a.Mode) populate(objectMap, "uri", a.URI) return json.Marshal(objectMap) @@ -908,6 +1113,15 @@ func (a *AssetJobOutput) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "assetName": + err = unpopulate(val, "AssetName", &a.AssetName) + delete(rawMsg, key) + case "assetVersion": + err = unpopulate(val, "AssetVersion", &a.AssetVersion) + delete(rawMsg, key) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &a.AutoDeleteSetting) + delete(rawMsg, key) case "mode": err = unpopulate(val, "Mode", &a.Mode) delete(rawMsg, key) @@ -980,6 +1194,37 @@ func (a *AssignedUser) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AutoDeleteSetting. +func (a AutoDeleteSetting) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "condition", a.Condition) + populate(objectMap, "value", a.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AutoDeleteSetting. +func (a *AutoDeleteSetting) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "condition": + err = unpopulate(val, "Condition", &a.Condition) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &a.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AutoForecastHorizon. func (a AutoForecastHorizon) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1020,9 +1265,12 @@ func (a AutoMLJob) MarshalJSON() ([]byte, error) { populate(objectMap, "identity", a.Identity) populate(objectMap, "isArchived", a.IsArchived) objectMap["jobType"] = JobTypeAutoML + populate(objectMap, "notificationSetting", a.NotificationSetting) populate(objectMap, "outputs", a.Outputs) populate(objectMap, "properties", a.Properties) + populate(objectMap, "queueSettings", a.QueueSettings) populate(objectMap, "resources", a.Resources) + populate(objectMap, "secretsConfiguration", a.SecretsConfiguration) populate(objectMap, "services", a.Services) populate(objectMap, "status", a.Status) populate(objectMap, "tags", a.Tags) @@ -1069,15 +1317,24 @@ func (a *AutoMLJob) UnmarshalJSON(data []byte) error { case "jobType": err = unpopulate(val, "JobType", &a.JobType) delete(rawMsg, key) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &a.NotificationSetting) + delete(rawMsg, key) case "outputs": a.Outputs, err = unmarshalJobOutputClassificationMap(val) delete(rawMsg, key) case "properties": err = unpopulate(val, "Properties", &a.Properties) delete(rawMsg, key) + case "queueSettings": + err = unpopulate(val, "QueueSettings", &a.QueueSettings) + delete(rawMsg, key) case "resources": err = unpopulate(val, "Resources", &a.Resources) delete(rawMsg, key) + case "secretsConfiguration": + err = unpopulate(val, "SecretsConfiguration", &a.SecretsConfiguration) + delete(rawMsg, key) case "services": err = unpopulate(val, "Services", &a.Services) delete(rawMsg, key) @@ -1311,6 +1568,60 @@ func (a *AutoTargetRollingWindowSize) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AutologgerSettings. +func (a AutologgerSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "mlflowAutologger", a.MlflowAutologger) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AutologgerSettings. +func (a *AutologgerSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mlflowAutologger": + err = unpopulate(val, "MlflowAutologger", &a.MlflowAutologger) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzMonMonitoringAlertNotificationSettings. +func (a AzMonMonitoringAlertNotificationSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["alertNotificationType"] = MonitoringAlertNotificationTypeAzureMonitor + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzMonMonitoringAlertNotificationSettings. +func (a *AzMonMonitoringAlertNotificationSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alertNotificationType": + err = unpopulate(val, "AlertNotificationType", &a.AlertNotificationType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AzureBlobDatastore. func (a AzureBlobDatastore) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1320,10 +1631,13 @@ func (a AzureBlobDatastore) MarshalJSON() ([]byte, error) { objectMap["datastoreType"] = DatastoreTypeAzureBlob populate(objectMap, "description", a.Description) populate(objectMap, "endpoint", a.Endpoint) + populate(objectMap, "intellectualProperty", a.IntellectualProperty) populate(objectMap, "isDefault", a.IsDefault) populate(objectMap, "properties", a.Properties) populate(objectMap, "protocol", a.Protocol) + populate(objectMap, "resourceGroup", a.ResourceGroup) populate(objectMap, "serviceDataAccessAuthIdentity", a.ServiceDataAccessAuthIdentity) + populate(objectMap, "subscriptionId", a.SubscriptionID) populate(objectMap, "tags", a.Tags) return json.Marshal(objectMap) } @@ -1355,6 +1669,9 @@ func (a *AzureBlobDatastore) UnmarshalJSON(data []byte) error { case "endpoint": err = unpopulate(val, "Endpoint", &a.Endpoint) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &a.IntellectualProperty) + delete(rawMsg, key) case "isDefault": err = unpopulate(val, "IsDefault", &a.IsDefault) delete(rawMsg, key) @@ -1364,9 +1681,15 @@ func (a *AzureBlobDatastore) UnmarshalJSON(data []byte) error { case "protocol": err = unpopulate(val, "Protocol", &a.Protocol) delete(rawMsg, key) + case "resourceGroup": + err = unpopulate(val, "ResourceGroup", &a.ResourceGroup) + delete(rawMsg, key) case "serviceDataAccessAuthIdentity": err = unpopulate(val, "ServiceDataAccessAuthIdentity", &a.ServiceDataAccessAuthIdentity) delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &a.SubscriptionID) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &a.Tags) delete(rawMsg, key) @@ -1384,10 +1707,13 @@ func (a AzureDataLakeGen1Datastore) MarshalJSON() ([]byte, error) { populate(objectMap, "credentials", a.Credentials) objectMap["datastoreType"] = DatastoreTypeAzureDataLakeGen1 populate(objectMap, "description", a.Description) + populate(objectMap, "intellectualProperty", a.IntellectualProperty) populate(objectMap, "isDefault", a.IsDefault) populate(objectMap, "properties", a.Properties) + populate(objectMap, "resourceGroup", a.ResourceGroup) populate(objectMap, "serviceDataAccessAuthIdentity", a.ServiceDataAccessAuthIdentity) populate(objectMap, "storeName", a.StoreName) + populate(objectMap, "subscriptionId", a.SubscriptionID) populate(objectMap, "tags", a.Tags) return json.Marshal(objectMap) } @@ -1410,18 +1736,27 @@ func (a *AzureDataLakeGen1Datastore) UnmarshalJSON(data []byte) error { case "description": err = unpopulate(val, "Description", &a.Description) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &a.IntellectualProperty) + delete(rawMsg, key) case "isDefault": err = unpopulate(val, "IsDefault", &a.IsDefault) delete(rawMsg, key) case "properties": err = unpopulate(val, "Properties", &a.Properties) delete(rawMsg, key) + case "resourceGroup": + err = unpopulate(val, "ResourceGroup", &a.ResourceGroup) + delete(rawMsg, key) case "serviceDataAccessAuthIdentity": err = unpopulate(val, "ServiceDataAccessAuthIdentity", &a.ServiceDataAccessAuthIdentity) delete(rawMsg, key) case "storeName": err = unpopulate(val, "StoreName", &a.StoreName) delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &a.SubscriptionID) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &a.Tags) delete(rawMsg, key) @@ -1442,10 +1777,13 @@ func (a AzureDataLakeGen2Datastore) MarshalJSON() ([]byte, error) { populate(objectMap, "description", a.Description) populate(objectMap, "endpoint", a.Endpoint) populate(objectMap, "filesystem", a.Filesystem) + populate(objectMap, "intellectualProperty", a.IntellectualProperty) populate(objectMap, "isDefault", a.IsDefault) populate(objectMap, "properties", a.Properties) populate(objectMap, "protocol", a.Protocol) + populate(objectMap, "resourceGroup", a.ResourceGroup) populate(objectMap, "serviceDataAccessAuthIdentity", a.ServiceDataAccessAuthIdentity) + populate(objectMap, "subscriptionId", a.SubscriptionID) populate(objectMap, "tags", a.Tags) return json.Marshal(objectMap) } @@ -1477,6 +1815,9 @@ func (a *AzureDataLakeGen2Datastore) UnmarshalJSON(data []byte) error { case "filesystem": err = unpopulate(val, "Filesystem", &a.Filesystem) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &a.IntellectualProperty) + delete(rawMsg, key) case "isDefault": err = unpopulate(val, "IsDefault", &a.IsDefault) delete(rawMsg, key) @@ -1486,9 +1827,15 @@ func (a *AzureDataLakeGen2Datastore) UnmarshalJSON(data []byte) error { case "protocol": err = unpopulate(val, "Protocol", &a.Protocol) delete(rawMsg, key) + case "resourceGroup": + err = unpopulate(val, "ResourceGroup", &a.ResourceGroup) + delete(rawMsg, key) case "serviceDataAccessAuthIdentity": err = unpopulate(val, "ServiceDataAccessAuthIdentity", &a.ServiceDataAccessAuthIdentity) delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &a.SubscriptionID) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &a.Tags) delete(rawMsg, key) @@ -1500,6 +1847,68 @@ func (a *AzureDataLakeGen2Datastore) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AzureDatastore. +func (a AzureDatastore) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "resourceGroup", a.ResourceGroup) + populate(objectMap, "subscriptionId", a.SubscriptionID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureDatastore. +func (a *AzureDatastore) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "resourceGroup": + err = unpopulate(val, "ResourceGroup", &a.ResourceGroup) + delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &a.SubscriptionID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzureDevOpsWebhook. +func (a AzureDevOpsWebhook) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "eventType", a.EventType) + objectMap["webhookType"] = WebhookTypeAzureDevOps + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureDevOpsWebhook. +func (a *AzureDevOpsWebhook) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "eventType": + err = unpopulate(val, "EventType", &a.EventType) + delete(rawMsg, key) + case "webhookType": + err = unpopulate(val, "WebhookType", &a.WebhookType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AzureFileDatastore. func (a AzureFileDatastore) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1509,10 +1918,13 @@ func (a AzureFileDatastore) MarshalJSON() ([]byte, error) { populate(objectMap, "description", a.Description) populate(objectMap, "endpoint", a.Endpoint) populate(objectMap, "fileShareName", a.FileShareName) + populate(objectMap, "intellectualProperty", a.IntellectualProperty) populate(objectMap, "isDefault", a.IsDefault) populate(objectMap, "properties", a.Properties) populate(objectMap, "protocol", a.Protocol) + populate(objectMap, "resourceGroup", a.ResourceGroup) populate(objectMap, "serviceDataAccessAuthIdentity", a.ServiceDataAccessAuthIdentity) + populate(objectMap, "subscriptionId", a.SubscriptionID) populate(objectMap, "tags", a.Tags) return json.Marshal(objectMap) } @@ -1544,6 +1956,9 @@ func (a *AzureFileDatastore) UnmarshalJSON(data []byte) error { case "fileShareName": err = unpopulate(val, "FileShareName", &a.FileShareName) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &a.IntellectualProperty) + delete(rawMsg, key) case "isDefault": err = unpopulate(val, "IsDefault", &a.IsDefault) delete(rawMsg, key) @@ -1553,9 +1968,15 @@ func (a *AzureFileDatastore) UnmarshalJSON(data []byte) error { case "protocol": err = unpopulate(val, "Protocol", &a.Protocol) delete(rawMsg, key) + case "resourceGroup": + err = unpopulate(val, "ResourceGroup", &a.ResourceGroup) + delete(rawMsg, key) case "serviceDataAccessAuthIdentity": err = unpopulate(val, "ServiceDataAccessAuthIdentity", &a.ServiceDataAccessAuthIdentity) delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &a.SubscriptionID) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &a.Tags) delete(rawMsg, key) @@ -1567,6 +1988,68 @@ func (a *AzureFileDatastore) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AzureMLBatchInferencingServer. +func (a AzureMLBatchInferencingServer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "codeConfiguration", a.CodeConfiguration) + objectMap["serverType"] = InferencingServerTypeAzureMLBatch + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureMLBatchInferencingServer. +func (a *AzureMLBatchInferencingServer) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "codeConfiguration": + err = unpopulate(val, "CodeConfiguration", &a.CodeConfiguration) + delete(rawMsg, key) + case "serverType": + err = unpopulate(val, "ServerType", &a.ServerType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzureMLOnlineInferencingServer. +func (a AzureMLOnlineInferencingServer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "codeConfiguration", a.CodeConfiguration) + objectMap["serverType"] = InferencingServerTypeAzureMLOnline + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureMLOnlineInferencingServer. +func (a *AzureMLOnlineInferencingServer) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "codeConfiguration": + err = unpopulate(val, "CodeConfiguration", &a.CodeConfiguration) + delete(rawMsg, key) + case "serverType": + err = unpopulate(val, "ServerType", &a.ServerType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type BanditPolicy. func (b BanditPolicy) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1610,6 +2093,64 @@ func (b *BanditPolicy) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type BaseEnvironmentID. +func (b BaseEnvironmentID) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["baseEnvironmentSourceType"] = BaseEnvironmentSourceTypeEnvironmentAsset + populate(objectMap, "resourceId", b.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BaseEnvironmentID. +func (b *BaseEnvironmentID) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "baseEnvironmentSourceType": + err = unpopulate(val, "BaseEnvironmentSourceType", &b.BaseEnvironmentSourceType) + delete(rawMsg, key) + case "resourceId": + err = unpopulate(val, "ResourceID", &b.ResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type BaseEnvironmentSource. +func (b BaseEnvironmentSource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["baseEnvironmentSourceType"] = b.BaseEnvironmentSourceType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BaseEnvironmentSource. +func (b *BaseEnvironmentSource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "baseEnvironmentSourceType": + err = unpopulate(val, "BaseEnvironmentSourceType", &b.BaseEnvironmentSourceType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type BatchDeployment. func (b BatchDeployment) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1673,11 +2214,39 @@ func (b *BatchDeployment) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type BatchDeploymentConfiguration. +func (b BatchDeploymentConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["deploymentConfigurationType"] = b.DeploymentConfigurationType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BatchDeploymentConfiguration. +func (b *BatchDeploymentConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "deploymentConfigurationType": + err = unpopulate(val, "DeploymentConfigurationType", &b.DeploymentConfigurationType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type BatchDeploymentProperties. func (b BatchDeploymentProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "codeConfiguration", b.CodeConfiguration) populate(objectMap, "compute", b.Compute) + populate(objectMap, "deploymentConfiguration", b.DeploymentConfiguration) populate(objectMap, "description", b.Description) populate(objectMap, "environmentId", b.EnvironmentID) populate(objectMap, "environmentVariables", b.EnvironmentVariables) @@ -1710,6 +2279,9 @@ func (b *BatchDeploymentProperties) UnmarshalJSON(data []byte) error { case "compute": err = unpopulate(val, "Compute", &b.Compute) delete(rawMsg, key) + case "deploymentConfiguration": + b.DeploymentConfiguration, err = unmarshalBatchDeploymentConfigurationClassification(val) + delete(rawMsg, key) case "description": err = unpopulate(val, "Description", &b.Description) delete(rawMsg, key) @@ -1967,6 +2539,49 @@ func (b *BatchEndpointTrackedResourceArmPaginatedResult) UnmarshalJSON(data []by return nil } +// MarshalJSON implements the json.Marshaller interface for type BatchPipelineComponentDeploymentConfiguration. +func (b BatchPipelineComponentDeploymentConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "componentId", b.ComponentID) + objectMap["deploymentConfigurationType"] = BatchDeploymentConfigurationTypePipelineComponent + populate(objectMap, "description", b.Description) + populate(objectMap, "settings", b.Settings) + populate(objectMap, "tags", b.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BatchPipelineComponentDeploymentConfiguration. +func (b *BatchPipelineComponentDeploymentConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "componentId": + err = unpopulate(val, "ComponentID", &b.ComponentID) + delete(rawMsg, key) + case "deploymentConfigurationType": + err = unpopulate(val, "DeploymentConfigurationType", &b.DeploymentConfigurationType) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &b.Description) + delete(rawMsg, key) + case "settings": + err = unpopulate(val, "Settings", &b.Settings) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &b.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type BatchRetrySettings. func (b BatchRetrySettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2025,16 +2640,17 @@ func (b *BayesianSamplingAlgorithm) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type BuildContext. -func (b BuildContext) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type BindOptions. +func (b BindOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "contextUri", b.ContextURI) - populate(objectMap, "dockerfilePath", b.DockerfilePath) + populate(objectMap, "createHostPath", b.CreateHostPath) + populate(objectMap, "propagation", b.Propagation) + populate(objectMap, "selinux", b.Selinux) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type BuildContext. -func (b *BuildContext) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type BindOptions. +func (b *BindOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", b, err) @@ -2042,11 +2658,14 @@ func (b *BuildContext) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "contextUri": - err = unpopulate(val, "ContextURI", &b.ContextURI) + case "createHostPath": + err = unpopulate(val, "CreateHostPath", &b.CreateHostPath) delete(rawMsg, key) - case "dockerfilePath": - err = unpopulate(val, "DockerfilePath", &b.DockerfilePath) + case "propagation": + err = unpopulate(val, "Propagation", &b.Propagation) + delete(rawMsg, key) + case "selinux": + err = unpopulate(val, "Selinux", &b.Selinux) delete(rawMsg, key) } if err != nil { @@ -2056,67 +2675,289 @@ func (b *BuildContext) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CertificateDatastoreCredentials. -func (c CertificateDatastoreCredentials) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type BlobReferenceForConsumptionDto. +func (b BlobReferenceForConsumptionDto) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "authorityUrl", c.AuthorityURL) - populate(objectMap, "clientId", c.ClientID) - objectMap["credentialsType"] = CredentialsTypeCertificate - populate(objectMap, "resourceUrl", c.ResourceURL) - populate(objectMap, "secrets", c.Secrets) - populate(objectMap, "tenantId", c.TenantID) - populate(objectMap, "thumbprint", c.Thumbprint) + populate(objectMap, "blobUri", b.BlobURI) + populate(objectMap, "credential", b.Credential) + populate(objectMap, "storageAccountArmId", b.StorageAccountArmID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateDatastoreCredentials. -func (c *CertificateDatastoreCredentials) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type BlobReferenceForConsumptionDto. +func (b *BlobReferenceForConsumptionDto) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", b, err) } for key, val := range rawMsg { var err error switch key { - case "authorityUrl": - err = unpopulate(val, "AuthorityURL", &c.AuthorityURL) - delete(rawMsg, key) - case "clientId": - err = unpopulate(val, "ClientID", &c.ClientID) - delete(rawMsg, key) - case "credentialsType": - err = unpopulate(val, "CredentialsType", &c.CredentialsType) + case "blobUri": + err = unpopulate(val, "BlobURI", &b.BlobURI) delete(rawMsg, key) - case "resourceUrl": - err = unpopulate(val, "ResourceURL", &c.ResourceURL) - delete(rawMsg, key) - case "secrets": - err = unpopulate(val, "Secrets", &c.Secrets) - delete(rawMsg, key) - case "tenantId": - err = unpopulate(val, "TenantID", &c.TenantID) + case "credential": + b.Credential, err = unmarshalPendingUploadCredentialDtoClassification(val) delete(rawMsg, key) - case "thumbprint": - err = unpopulate(val, "Thumbprint", &c.Thumbprint) + case "storageAccountArmId": + err = unpopulate(val, "StorageAccountArmID", &b.StorageAccountArmID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", b, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type CertificateDatastoreSecrets. -func (c CertificateDatastoreSecrets) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type BuildContext. +func (b BuildContext) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "certificate", c.Certificate) - objectMap["secretsType"] = SecretsTypeCertificate + populate(objectMap, "contextUri", b.ContextURI) + populate(objectMap, "dockerfilePath", b.DockerfilePath) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateDatastoreSecrets. -func (c *CertificateDatastoreSecrets) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type BuildContext. +func (b *BuildContext) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contextUri": + err = unpopulate(val, "ContextURI", &b.ContextURI) + delete(rawMsg, key) + case "dockerfilePath": + err = unpopulate(val, "DockerfilePath", &b.DockerfilePath) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CSVExportSummary. +func (c CSVExportSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "containerName", c.ContainerName) + populateTimeRFC3339(objectMap, "endDateTime", c.EndDateTime) + populate(objectMap, "exportedRowCount", c.ExportedRowCount) + objectMap["format"] = ExportFormatTypeCSV + populate(objectMap, "labelingJobId", c.LabelingJobID) + populate(objectMap, "snapshotPath", c.SnapshotPath) + populateTimeRFC3339(objectMap, "startDateTime", c.StartDateTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CSVExportSummary. +func (c *CSVExportSummary) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "containerName": + err = unpopulate(val, "ContainerName", &c.ContainerName) + delete(rawMsg, key) + case "endDateTime": + err = unpopulateTimeRFC3339(val, "EndDateTime", &c.EndDateTime) + delete(rawMsg, key) + case "exportedRowCount": + err = unpopulate(val, "ExportedRowCount", &c.ExportedRowCount) + delete(rawMsg, key) + case "format": + err = unpopulate(val, "Format", &c.Format) + delete(rawMsg, key) + case "labelingJobId": + err = unpopulate(val, "LabelingJobID", &c.LabelingJobID) + delete(rawMsg, key) + case "snapshotPath": + err = unpopulate(val, "SnapshotPath", &c.SnapshotPath) + delete(rawMsg, key) + case "startDateTime": + err = unpopulateTimeRFC3339(val, "StartDateTime", &c.StartDateTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CategoricalDataDriftMetricThreshold. +func (c CategoricalDataDriftMetricThreshold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["dataType"] = MonitoringFeatureDataTypeCategorical + populate(objectMap, "metric", c.Metric) + populate(objectMap, "threshold", c.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CategoricalDataDriftMetricThreshold. +func (c *CategoricalDataDriftMetricThreshold) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataType": + err = unpopulate(val, "DataType", &c.DataType) + delete(rawMsg, key) + case "metric": + err = unpopulate(val, "Metric", &c.Metric) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &c.Threshold) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CategoricalDataQualityMetricThreshold. +func (c CategoricalDataQualityMetricThreshold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["dataType"] = MonitoringFeatureDataTypeCategorical + populate(objectMap, "metric", c.Metric) + populate(objectMap, "threshold", c.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CategoricalDataQualityMetricThreshold. +func (c *CategoricalDataQualityMetricThreshold) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataType": + err = unpopulate(val, "DataType", &c.DataType) + delete(rawMsg, key) + case "metric": + err = unpopulate(val, "Metric", &c.Metric) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &c.Threshold) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CategoricalPredictionDriftMetricThreshold. +func (c CategoricalPredictionDriftMetricThreshold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["dataType"] = MonitoringFeatureDataTypeCategorical + populate(objectMap, "metric", c.Metric) + populate(objectMap, "threshold", c.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CategoricalPredictionDriftMetricThreshold. +func (c *CategoricalPredictionDriftMetricThreshold) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataType": + err = unpopulate(val, "DataType", &c.DataType) + delete(rawMsg, key) + case "metric": + err = unpopulate(val, "Metric", &c.Metric) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &c.Threshold) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CertificateDatastoreCredentials. +func (c CertificateDatastoreCredentials) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "authorityUrl", c.AuthorityURL) + populate(objectMap, "clientId", c.ClientID) + objectMap["credentialsType"] = CredentialsTypeCertificate + populate(objectMap, "resourceUrl", c.ResourceURL) + populate(objectMap, "secrets", c.Secrets) + populate(objectMap, "tenantId", c.TenantID) + populate(objectMap, "thumbprint", c.Thumbprint) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateDatastoreCredentials. +func (c *CertificateDatastoreCredentials) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "authorityUrl": + err = unpopulate(val, "AuthorityURL", &c.AuthorityURL) + delete(rawMsg, key) + case "clientId": + err = unpopulate(val, "ClientID", &c.ClientID) + delete(rawMsg, key) + case "credentialsType": + err = unpopulate(val, "CredentialsType", &c.CredentialsType) + delete(rawMsg, key) + case "resourceUrl": + err = unpopulate(val, "ResourceURL", &c.ResourceURL) + delete(rawMsg, key) + case "secrets": + err = unpopulate(val, "Secrets", &c.Secrets) + delete(rawMsg, key) + case "tenantId": + err = unpopulate(val, "TenantID", &c.TenantID) + delete(rawMsg, key) + case "thumbprint": + err = unpopulate(val, "Thumbprint", &c.Thumbprint) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CertificateDatastoreSecrets. +func (c CertificateDatastoreSecrets) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "certificate", c.Certificate) + objectMap["secretsType"] = SecretsTypeCertificate + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateDatastoreSecrets. +func (c *CertificateDatastoreSecrets) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -2143,11 +2984,14 @@ func (c Classification) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "cvSplitColumnNames", c.CvSplitColumnNames) populate(objectMap, "featurizationSettings", c.FeaturizationSettings) + populate(objectMap, "fixedParameters", c.FixedParameters) populate(objectMap, "limitSettings", c.LimitSettings) populate(objectMap, "logVerbosity", c.LogVerbosity) populate(objectMap, "nCrossValidations", c.NCrossValidations) populate(objectMap, "positiveLabel", c.PositiveLabel) populate(objectMap, "primaryMetric", c.PrimaryMetric) + populate(objectMap, "searchSpace", c.SearchSpace) + populate(objectMap, "sweepSettings", c.SweepSettings) populate(objectMap, "targetColumnName", c.TargetColumnName) objectMap["taskType"] = TaskTypeClassification populate(objectMap, "testData", c.TestData) @@ -2175,6 +3019,9 @@ func (c *Classification) UnmarshalJSON(data []byte) error { case "featurizationSettings": err = unpopulate(val, "FeaturizationSettings", &c.FeaturizationSettings) delete(rawMsg, key) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &c.FixedParameters) + delete(rawMsg, key) case "limitSettings": err = unpopulate(val, "LimitSettings", &c.LimitSettings) delete(rawMsg, key) @@ -2190,6 +3037,12 @@ func (c *Classification) UnmarshalJSON(data []byte) error { case "primaryMetric": err = unpopulate(val, "PrimaryMetric", &c.PrimaryMetric) delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &c.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &c.SweepSettings) + delete(rawMsg, key) case "targetColumnName": err = unpopulate(val, "TargetColumnName", &c.TargetColumnName) delete(rawMsg, key) @@ -2225,6 +3078,41 @@ func (c *Classification) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ClassificationModelPerformanceMetricThreshold. +func (c ClassificationModelPerformanceMetricThreshold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "metric", c.Metric) + objectMap["modelType"] = MonitoringModelTypeClassification + populate(objectMap, "threshold", c.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClassificationModelPerformanceMetricThreshold. +func (c *ClassificationModelPerformanceMetricThreshold) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "metric": + err = unpopulate(val, "Metric", &c.Metric) + delete(rawMsg, key) + case "modelType": + err = unpopulate(val, "ModelType", &c.ModelType) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &c.Threshold) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClassificationTrainingSettings. func (c ClassificationTrainingSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2237,6 +3125,7 @@ func (c ClassificationTrainingSettings) MarshalJSON() ([]byte, error) { populate(objectMap, "enableVoteEnsemble", c.EnableVoteEnsemble) populate(objectMap, "ensembleModelDownloadTimeout", c.EnsembleModelDownloadTimeout) populate(objectMap, "stackEnsembleSettings", c.StackEnsembleSettings) + populate(objectMap, "trainingMode", c.TrainingMode) return json.Marshal(objectMap) } @@ -2276,6 +3165,9 @@ func (c *ClassificationTrainingSettings) UnmarshalJSON(data []byte) error { case "stackEnsembleSettings": err = unpopulate(val, "StackEnsembleSettings", &c.StackEnsembleSettings) delete(rawMsg, key) + case "trainingMode": + err = unpopulate(val, "TrainingMode", &c.TrainingMode) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -2338,6 +3230,57 @@ func (c *ClusterUpdateProperties) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type CocoExportSummary. +func (c CocoExportSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "containerName", c.ContainerName) + populateTimeRFC3339(objectMap, "endDateTime", c.EndDateTime) + populate(objectMap, "exportedRowCount", c.ExportedRowCount) + objectMap["format"] = ExportFormatTypeCoco + populate(objectMap, "labelingJobId", c.LabelingJobID) + populate(objectMap, "snapshotPath", c.SnapshotPath) + populateTimeRFC3339(objectMap, "startDateTime", c.StartDateTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CocoExportSummary. +func (c *CocoExportSummary) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "containerName": + err = unpopulate(val, "ContainerName", &c.ContainerName) + delete(rawMsg, key) + case "endDateTime": + err = unpopulateTimeRFC3339(val, "EndDateTime", &c.EndDateTime) + delete(rawMsg, key) + case "exportedRowCount": + err = unpopulate(val, "ExportedRowCount", &c.ExportedRowCount) + delete(rawMsg, key) + case "format": + err = unpopulate(val, "Format", &c.Format) + delete(rawMsg, key) + case "labelingJobId": + err = unpopulate(val, "LabelingJobID", &c.LabelingJobID) + delete(rawMsg, key) + case "snapshotPath": + err = unpopulate(val, "SnapshotPath", &c.SnapshotPath) + delete(rawMsg, key) + case "startDateTime": + err = unpopulateTimeRFC3339(val, "StartDateTime", &c.StartDateTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type CodeConfiguration. func (c CodeConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2420,6 +3363,7 @@ func (c CodeContainerProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "latestVersion", c.LatestVersion) populate(objectMap, "nextVersion", c.NextVersion) populate(objectMap, "properties", c.Properties) + populate(objectMap, "provisioningState", c.ProvisioningState) populate(objectMap, "tags", c.Tags) return json.Marshal(objectMap) } @@ -2448,6 +3392,9 @@ func (c *CodeContainerProperties) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &c.Properties) delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &c.ProvisioningState) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &c.Tags) delete(rawMsg, key) @@ -2536,11 +3483,13 @@ func (c *CodeVersion) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type CodeVersionProperties. func (c CodeVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", c.AutoDeleteSetting) populate(objectMap, "codeUri", c.CodeURI) populate(objectMap, "description", c.Description) populate(objectMap, "isAnonymous", c.IsAnonymous) populate(objectMap, "isArchived", c.IsArchived) populate(objectMap, "properties", c.Properties) + populate(objectMap, "provisioningState", c.ProvisioningState) populate(objectMap, "tags", c.Tags) return json.Marshal(objectMap) } @@ -2554,6 +3503,9 @@ func (c *CodeVersionProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &c.AutoDeleteSetting) + delete(rawMsg, key) case "codeUri": err = unpopulate(val, "CodeURI", &c.CodeURI) delete(rawMsg, key) @@ -2569,6 +3521,9 @@ func (c *CodeVersionProperties) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &c.Properties) delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &c.ProvisioningState) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &c.Tags) delete(rawMsg, key) @@ -2611,16 +3566,18 @@ func (c *CodeVersionResourceArmPaginatedResult) UnmarshalJSON(data []byte) error return nil } -// MarshalJSON implements the json.Marshaller interface for type ColumnTransformer. -func (c ColumnTransformer) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Collection. +func (c Collection) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "fields", c.Fields) - populateAny(objectMap, "parameters", c.Parameters) + populate(objectMap, "clientId", c.ClientID) + populate(objectMap, "dataCollectionMode", c.DataCollectionMode) + populate(objectMap, "dataId", c.DataID) + populate(objectMap, "samplingRate", c.SamplingRate) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ColumnTransformer. -func (c *ColumnTransformer) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Collection. +func (c *Collection) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -2628,11 +3585,48 @@ func (c *ColumnTransformer) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "fields": - err = unpopulate(val, "Fields", &c.Fields) + case "clientId": + err = unpopulate(val, "ClientID", &c.ClientID) delete(rawMsg, key) - case "parameters": - err = unpopulate(val, "Parameters", &c.Parameters) + case "dataCollectionMode": + err = unpopulate(val, "DataCollectionMode", &c.DataCollectionMode) + delete(rawMsg, key) + case "dataId": + err = unpopulate(val, "DataID", &c.DataID) + delete(rawMsg, key) + case "samplingRate": + err = unpopulate(val, "SamplingRate", &c.SamplingRate) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ColumnTransformer. +func (c ColumnTransformer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "fields", c.Fields) + populateAny(objectMap, "parameters", c.Parameters) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ColumnTransformer. +func (c *ColumnTransformer) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "fields": + err = unpopulate(val, "Fields", &c.Fields) + delete(rawMsg, key) + case "parameters": + err = unpopulate(val, "Parameters", &c.Parameters) delete(rawMsg, key) } if err != nil { @@ -2645,6 +3639,7 @@ func (c *ColumnTransformer) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type CommandJob. func (c CommandJob) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autologgerSettings", c.AutologgerSettings) populate(objectMap, "codeId", c.CodeID) populate(objectMap, "command", c.Command) populate(objectMap, "componentId", c.ComponentID) @@ -2660,10 +3655,13 @@ func (c CommandJob) MarshalJSON() ([]byte, error) { populate(objectMap, "isArchived", c.IsArchived) objectMap["jobType"] = JobTypeCommand populate(objectMap, "limits", c.Limits) + populate(objectMap, "notificationSetting", c.NotificationSetting) populate(objectMap, "outputs", c.Outputs) populateAny(objectMap, "parameters", c.Parameters) populate(objectMap, "properties", c.Properties) + populate(objectMap, "queueSettings", c.QueueSettings) populate(objectMap, "resources", c.Resources) + populate(objectMap, "secretsConfiguration", c.SecretsConfiguration) populate(objectMap, "services", c.Services) populate(objectMap, "status", c.Status) populate(objectMap, "tags", c.Tags) @@ -2679,6 +3677,9 @@ func (c *CommandJob) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autologgerSettings": + err = unpopulate(val, "AutologgerSettings", &c.AutologgerSettings) + delete(rawMsg, key) case "codeId": err = unpopulate(val, "CodeID", &c.CodeID) delete(rawMsg, key) @@ -2724,6 +3725,9 @@ func (c *CommandJob) UnmarshalJSON(data []byte) error { case "limits": err = unpopulate(val, "Limits", &c.Limits) delete(rawMsg, key) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &c.NotificationSetting) + delete(rawMsg, key) case "outputs": c.Outputs, err = unmarshalJobOutputClassificationMap(val) delete(rawMsg, key) @@ -2733,9 +3737,15 @@ func (c *CommandJob) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &c.Properties) delete(rawMsg, key) + case "queueSettings": + err = unpopulate(val, "QueueSettings", &c.QueueSettings) + delete(rawMsg, key) case "resources": err = unpopulate(val, "Resources", &c.Resources) delete(rawMsg, key) + case "secretsConfiguration": + err = unpopulate(val, "SecretsConfiguration", &c.SecretsConfiguration) + delete(rawMsg, key) case "services": err = unpopulate(val, "Services", &c.Services) delete(rawMsg, key) @@ -2835,6 +3845,7 @@ func (c ComponentContainerProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "latestVersion", c.LatestVersion) populate(objectMap, "nextVersion", c.NextVersion) populate(objectMap, "properties", c.Properties) + populate(objectMap, "provisioningState", c.ProvisioningState) populate(objectMap, "tags", c.Tags) return json.Marshal(objectMap) } @@ -2863,6 +3874,9 @@ func (c *ComponentContainerProperties) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &c.Properties) delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &c.ProvisioningState) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &c.Tags) delete(rawMsg, key) @@ -2951,11 +3965,14 @@ func (c *ComponentVersion) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ComponentVersionProperties. func (c ComponentVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", c.AutoDeleteSetting) populateAny(objectMap, "componentSpec", c.ComponentSpec) populate(objectMap, "description", c.Description) populate(objectMap, "isAnonymous", c.IsAnonymous) populate(objectMap, "isArchived", c.IsArchived) populate(objectMap, "properties", c.Properties) + populate(objectMap, "provisioningState", c.ProvisioningState) + populate(objectMap, "stage", c.Stage) populate(objectMap, "tags", c.Tags) return json.Marshal(objectMap) } @@ -2969,6 +3986,9 @@ func (c *ComponentVersionProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &c.AutoDeleteSetting) + delete(rawMsg, key) case "componentSpec": err = unpopulate(val, "ComponentSpec", &c.ComponentSpec) delete(rawMsg, key) @@ -2984,6 +4004,12 @@ func (c *ComponentVersionProperties) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &c.Properties) delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &c.ProvisioningState) + delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &c.Stage) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &c.Tags) delete(rawMsg, key) @@ -3187,6 +4213,33 @@ func (c *ComputeInstanceApplication) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ComputeInstanceAutologgerSettings. +func (c ComputeInstanceAutologgerSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "mlflowAutologger", c.MlflowAutologger) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ComputeInstanceAutologgerSettings. +func (c *ComputeInstanceAutologgerSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mlflowAutologger": + err = unpopulate(val, "MlflowAutologger", &c.MlflowAutologger) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ComputeInstanceConnectivityEndpoints. func (c ComputeInstanceConnectivityEndpoints) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -3473,15 +4526,19 @@ func (c ComputeInstanceProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "applicationSharingPolicy", c.ApplicationSharingPolicy) populate(objectMap, "applications", c.Applications) + populate(objectMap, "autologgerSettings", c.AutologgerSettings) populate(objectMap, "computeInstanceAuthorizationType", c.ComputeInstanceAuthorizationType) populate(objectMap, "connectivityEndpoints", c.ConnectivityEndpoints) populate(objectMap, "containers", c.Containers) populate(objectMap, "createdBy", c.CreatedBy) + populate(objectMap, "customServices", c.CustomServices) populate(objectMap, "dataDisks", c.DataDisks) populate(objectMap, "dataMounts", c.DataMounts) populate(objectMap, "enableNodePublicIp", c.EnableNodePublicIP) populate(objectMap, "errors", c.Errors) + populate(objectMap, "idleTimeBeforeShutdown", c.IdleTimeBeforeShutdown) populate(objectMap, "lastOperation", c.LastOperation) + populate(objectMap, "osImageMetadata", c.OSImageMetadata) populate(objectMap, "personalComputeInstanceSettings", c.PersonalComputeInstanceSettings) populate(objectMap, "sshSettings", c.SSHSettings) populate(objectMap, "schedules", c.Schedules) @@ -3508,6 +4565,9 @@ func (c *ComputeInstanceProperties) UnmarshalJSON(data []byte) error { case "applications": err = unpopulate(val, "Applications", &c.Applications) delete(rawMsg, key) + case "autologgerSettings": + err = unpopulate(val, "AutologgerSettings", &c.AutologgerSettings) + delete(rawMsg, key) case "computeInstanceAuthorizationType": err = unpopulate(val, "ComputeInstanceAuthorizationType", &c.ComputeInstanceAuthorizationType) delete(rawMsg, key) @@ -3520,6 +4580,9 @@ func (c *ComputeInstanceProperties) UnmarshalJSON(data []byte) error { case "createdBy": err = unpopulate(val, "CreatedBy", &c.CreatedBy) delete(rawMsg, key) + case "customServices": + err = unpopulate(val, "CustomServices", &c.CustomServices) + delete(rawMsg, key) case "dataDisks": err = unpopulate(val, "DataDisks", &c.DataDisks) delete(rawMsg, key) @@ -3532,9 +4595,15 @@ func (c *ComputeInstanceProperties) UnmarshalJSON(data []byte) error { case "errors": err = unpopulate(val, "Errors", &c.Errors) delete(rawMsg, key) + case "idleTimeBeforeShutdown": + err = unpopulate(val, "IdleTimeBeforeShutdown", &c.IdleTimeBeforeShutdown) + delete(rawMsg, key) case "lastOperation": err = unpopulate(val, "LastOperation", &c.LastOperation) delete(rawMsg, key) + case "osImageMetadata": + err = unpopulate(val, "OSImageMetadata", &c.OSImageMetadata) + delete(rawMsg, key) case "personalComputeInstanceSettings": err = unpopulate(val, "PersonalComputeInstanceSettings", &c.PersonalComputeInstanceSettings) delete(rawMsg, key) @@ -3746,6 +4815,33 @@ func (c *ComputeResourceSchema) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ComputeRuntimeDto. +func (c ComputeRuntimeDto) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "sparkRuntimeVersion", c.SparkRuntimeVersion) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ComputeRuntimeDto. +func (c *ComputeRuntimeDto) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "sparkRuntimeVersion": + err = unpopulate(val, "SparkRuntimeVersion", &c.SparkRuntimeVersion) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ComputeSchedules. func (c ComputeSchedules) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -3948,6 +5044,72 @@ func (c *CosmosDbSettings) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type CreateMonitorAction. +func (c CreateMonitorAction) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["actionType"] = ScheduleActionTypeCreateMonitor + populate(objectMap, "monitorDefinition", c.MonitorDefinition) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CreateMonitorAction. +func (c *CreateMonitorAction) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "actionType": + err = unpopulate(val, "ActionType", &c.ActionType) + delete(rawMsg, key) + case "monitorDefinition": + err = unpopulate(val, "MonitorDefinition", &c.MonitorDefinition) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Cron. +func (c Cron) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "expression", c.Expression) + populate(objectMap, "startTime", c.StartTime) + populate(objectMap, "timeZone", c.TimeZone) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Cron. +func (c *Cron) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "expression": + err = unpopulate(val, "Expression", &c.Expression) + delete(rawMsg, key) + case "startTime": + err = unpopulate(val, "StartTime", &c.StartTime) + delete(rawMsg, key) + case "timeZone": + err = unpopulate(val, "TimeZone", &c.TimeZone) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type CronTrigger. func (c CronTrigger) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -4022,18 +5184,16 @@ func (c *CustomForecastHorizon) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CustomModelJobInput. -func (c CustomModelJobInput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type CustomInferencingServer. +func (c CustomInferencingServer) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", c.Description) - objectMap["jobInputType"] = JobInputTypeCustomModel - populate(objectMap, "mode", c.Mode) - populate(objectMap, "uri", c.URI) + populate(objectMap, "inferenceConfiguration", c.InferenceConfiguration) + objectMap["serverType"] = InferencingServerTypeCustom return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CustomModelJobInput. -func (c *CustomModelJobInput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomInferencingServer. +func (c *CustomInferencingServer) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -4041,17 +5201,11 @@ func (c *CustomModelJobInput) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &c.Description) - delete(rawMsg, key) - case "jobInputType": - err = unpopulate(val, "JobInputType", &c.JobInputType) - delete(rawMsg, key) - case "mode": - err = unpopulate(val, "Mode", &c.Mode) + case "inferenceConfiguration": + err = unpopulate(val, "InferenceConfiguration", &c.InferenceConfiguration) delete(rawMsg, key) - case "uri": - err = unpopulate(val, "URI", &c.URI) + case "serverType": + err = unpopulate(val, "ServerType", &c.ServerType) delete(rawMsg, key) } if err != nil { @@ -4061,18 +5215,15 @@ func (c *CustomModelJobInput) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CustomModelJobOutput. -func (c CustomModelJobOutput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type CustomKeys. +func (c CustomKeys) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", c.Description) - objectMap["jobOutputType"] = JobOutputTypeCustomModel - populate(objectMap, "mode", c.Mode) - populate(objectMap, "uri", c.URI) + populate(objectMap, "keys", c.Keys) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CustomModelJobOutput. -func (c *CustomModelJobOutput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomKeys. +func (c *CustomKeys) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -4080,17 +5231,8 @@ func (c *CustomModelJobOutput) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &c.Description) - delete(rawMsg, key) - case "jobOutputType": - err = unpopulate(val, "JobOutputType", &c.JobOutputType) - delete(rawMsg, key) - case "mode": - err = unpopulate(val, "Mode", &c.Mode) - delete(rawMsg, key) - case "uri": - err = unpopulate(val, "URI", &c.URI) + case "keys": + err = unpopulate(val, "Keys", &c.Keys) delete(rawMsg, key) } if err != nil { @@ -4100,16 +5242,20 @@ func (c *CustomModelJobOutput) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CustomNCrossValidations. -func (c CustomNCrossValidations) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type CustomKeysWorkspaceConnectionProperties. +func (c CustomKeysWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["mode"] = NCrossValidationsModeCustom - populate(objectMap, "value", c.Value) + objectMap["authType"] = ConnectionAuthTypeCustomKeys + populate(objectMap, "category", c.Category) + populate(objectMap, "credentials", c.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", c.ExpiryTime) + populateAny(objectMap, "metadata", c.Metadata) + populate(objectMap, "target", c.Target) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CustomNCrossValidations. -func (c *CustomNCrossValidations) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomKeysWorkspaceConnectionProperties. +func (c *CustomKeysWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -4117,11 +5263,23 @@ func (c *CustomNCrossValidations) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "mode": - err = unpopulate(val, "Mode", &c.Mode) + case "authType": + err = unpopulate(val, "AuthType", &c.AuthType) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &c.Value) + case "category": + err = unpopulate(val, "Category", &c.Category) + delete(rawMsg, key) + case "credentials": + err = unpopulate(val, "Credentials", &c.Credentials) + delete(rawMsg, key) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &c.ExpiryTime) + delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &c.Metadata) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &c.Target) delete(rawMsg, key) } if err != nil { @@ -4131,16 +5289,16 @@ func (c *CustomNCrossValidations) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CustomSeasonality. -func (c CustomSeasonality) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type CustomMetricThreshold. +func (c CustomMetricThreshold) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["mode"] = SeasonalityModeCustom - populate(objectMap, "value", c.Value) + populate(objectMap, "metric", c.Metric) + populate(objectMap, "threshold", c.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CustomSeasonality. -func (c *CustomSeasonality) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomMetricThreshold. +func (c *CustomMetricThreshold) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -4148,11 +5306,11 @@ func (c *CustomSeasonality) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "mode": - err = unpopulate(val, "Mode", &c.Mode) + case "metric": + err = unpopulate(val, "Metric", &c.Metric) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &c.Value) + case "threshold": + err = unpopulate(val, "Threshold", &c.Threshold) delete(rawMsg, key) } if err != nil { @@ -4162,16 +5320,18 @@ func (c *CustomSeasonality) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CustomTargetLags. -func (c CustomTargetLags) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type CustomModelJobInput. +func (c CustomModelJobInput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["mode"] = TargetLagsModeCustom - populate(objectMap, "values", c.Values) + populate(objectMap, "description", c.Description) + objectMap["jobInputType"] = JobInputTypeCustomModel + populate(objectMap, "mode", c.Mode) + populate(objectMap, "uri", c.URI) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CustomTargetLags. -func (c *CustomTargetLags) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomModelJobInput. +func (c *CustomModelJobInput) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -4179,11 +5339,17 @@ func (c *CustomTargetLags) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "description": + err = unpopulate(val, "Description", &c.Description) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &c.JobInputType) + delete(rawMsg, key) case "mode": err = unpopulate(val, "Mode", &c.Mode) delete(rawMsg, key) - case "values": - err = unpopulate(val, "Values", &c.Values) + case "uri": + err = unpopulate(val, "URI", &c.URI) delete(rawMsg, key) } if err != nil { @@ -4193,16 +5359,21 @@ func (c *CustomTargetLags) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CustomTargetRollingWindowSize. -func (c CustomTargetRollingWindowSize) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type CustomModelJobOutput. +func (c CustomModelJobOutput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["mode"] = TargetRollingWindowSizeModeCustom - populate(objectMap, "value", c.Value) + populate(objectMap, "assetName", c.AssetName) + populate(objectMap, "assetVersion", c.AssetVersion) + populate(objectMap, "autoDeleteSetting", c.AutoDeleteSetting) + populate(objectMap, "description", c.Description) + objectMap["jobOutputType"] = JobOutputTypeCustomModel + populate(objectMap, "mode", c.Mode) + populate(objectMap, "uri", c.URI) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CustomTargetRollingWindowSize. -func (c *CustomTargetRollingWindowSize) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomModelJobOutput. +func (c *CustomModelJobOutput) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -4210,16 +5381,307 @@ func (c *CustomTargetRollingWindowSize) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "mode": - err = unpopulate(val, "Mode", &c.Mode) + case "assetName": + err = unpopulate(val, "AssetName", &c.AssetName) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &c.Value) + case "assetVersion": + err = unpopulate(val, "AssetVersion", &c.AssetVersion) delete(rawMsg, key) - } - if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) - } + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &c.AutoDeleteSetting) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &c.Description) + delete(rawMsg, key) + case "jobOutputType": + err = unpopulate(val, "JobOutputType", &c.JobOutputType) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &c.Mode) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &c.URI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CustomMonitoringSignal. +func (c CustomMonitoringSignal) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "componentId", c.ComponentID) + populate(objectMap, "inputAssets", c.InputAssets) + populate(objectMap, "inputs", c.Inputs) + populate(objectMap, "metricThresholds", c.MetricThresholds) + populate(objectMap, "mode", c.Mode) + populate(objectMap, "properties", c.Properties) + objectMap["signalType"] = MonitoringSignalTypeCustom + populate(objectMap, "workspaceConnection", c.WorkspaceConnection) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomMonitoringSignal. +func (c *CustomMonitoringSignal) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "componentId": + err = unpopulate(val, "ComponentID", &c.ComponentID) + delete(rawMsg, key) + case "inputAssets": + c.InputAssets, err = unmarshalMonitoringInputDataBaseClassificationMap(val) + delete(rawMsg, key) + case "inputs": + c.Inputs, err = unmarshalJobInputClassificationMap(val) + delete(rawMsg, key) + case "metricThresholds": + err = unpopulate(val, "MetricThresholds", &c.MetricThresholds) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &c.Mode) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &c.Properties) + delete(rawMsg, key) + case "signalType": + err = unpopulate(val, "SignalType", &c.SignalType) + delete(rawMsg, key) + case "workspaceConnection": + err = unpopulate(val, "WorkspaceConnection", &c.WorkspaceConnection) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CustomNCrossValidations. +func (c CustomNCrossValidations) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mode"] = NCrossValidationsModeCustom + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomNCrossValidations. +func (c *CustomNCrossValidations) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &c.Mode) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CustomSeasonality. +func (c CustomSeasonality) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mode"] = SeasonalityModeCustom + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomSeasonality. +func (c *CustomSeasonality) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &c.Mode) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CustomService. +func (c CustomService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "docker", c.Docker) + populate(objectMap, "endpoints", c.Endpoints) + populate(objectMap, "environmentVariables", c.EnvironmentVariables) + populate(objectMap, "image", c.Image) + populate(objectMap, "name", c.Name) + populate(objectMap, "volumes", c.Volumes) + if c.AdditionalProperties != nil { + for key, val := range c.AdditionalProperties { + objectMap[key] = val + } + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomService. +func (c *CustomService) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "docker": + err = unpopulate(val, "Docker", &c.Docker) + delete(rawMsg, key) + case "endpoints": + err = unpopulate(val, "Endpoints", &c.Endpoints) + delete(rawMsg, key) + case "environmentVariables": + err = unpopulate(val, "EnvironmentVariables", &c.EnvironmentVariables) + delete(rawMsg, key) + case "image": + err = unpopulate(val, "Image", &c.Image) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "volumes": + err = unpopulate(val, "Volumes", &c.Volumes) + delete(rawMsg, key) + default: + if c.AdditionalProperties == nil { + c.AdditionalProperties = map[string]any{} + } + if val != nil { + var aux any + err = json.Unmarshal(val, &aux) + c.AdditionalProperties[key] = aux + } + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CustomTargetLags. +func (c CustomTargetLags) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mode"] = TargetLagsModeCustom + populate(objectMap, "values", c.Values) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomTargetLags. +func (c *CustomTargetLags) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &c.Mode) + delete(rawMsg, key) + case "values": + err = unpopulate(val, "Values", &c.Values) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CustomTargetRollingWindowSize. +func (c CustomTargetRollingWindowSize) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mode"] = TargetRollingWindowSizeModeCustom + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CustomTargetRollingWindowSize. +func (c *CustomTargetRollingWindowSize) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &c.Mode) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DataCollector. +func (d DataCollector) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "collections", d.Collections) + populate(objectMap, "requestLogging", d.RequestLogging) + populate(objectMap, "rollingRate", d.RollingRate) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataCollector. +func (d *DataCollector) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "collections": + err = unpopulate(val, "Collections", &d.Collections) + delete(rawMsg, key) + case "requestLogging": + err = unpopulate(val, "RequestLogging", &d.RequestLogging) + delete(rawMsg, key) + case "rollingRate": + err = unpopulate(val, "RollingRate", &d.RollingRate) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } } return nil } @@ -4349,24 +5811,16 @@ func (d *DataContainerResourceArmPaginatedResult) UnmarshalJSON(data []byte) err return nil } -// MarshalJSON implements the json.Marshaller interface for type DataFactory. -func (d DataFactory) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type DataDriftMetricThresholdBase. +func (d DataDriftMetricThresholdBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "computeLocation", d.ComputeLocation) - objectMap["computeType"] = ComputeTypeDataFactory - populateTimeRFC3339(objectMap, "createdOn", d.CreatedOn) - populate(objectMap, "description", d.Description) - populate(objectMap, "disableLocalAuth", d.DisableLocalAuth) - populate(objectMap, "isAttachedCompute", d.IsAttachedCompute) - populateTimeRFC3339(objectMap, "modifiedOn", d.ModifiedOn) - populate(objectMap, "provisioningErrors", d.ProvisioningErrors) - populate(objectMap, "provisioningState", d.ProvisioningState) - populate(objectMap, "resourceId", d.ResourceID) + objectMap["dataType"] = d.DataType + populate(objectMap, "threshold", d.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type DataFactory. -func (d *DataFactory) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type DataDriftMetricThresholdBase. +func (d *DataDriftMetricThresholdBase) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", d, err) @@ -4374,9 +5828,107 @@ func (d *DataFactory) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "computeLocation": - err = unpopulate(val, "ComputeLocation", &d.ComputeLocation) - delete(rawMsg, key) + case "dataType": + err = unpopulate(val, "DataType", &d.DataType) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &d.Threshold) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DataDriftMonitoringSignal. +func (d DataDriftMonitoringSignal) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dataSegment", d.DataSegment) + populate(objectMap, "featureDataTypeOverride", d.FeatureDataTypeOverride) + populate(objectMap, "features", d.Features) + populate(objectMap, "metricThresholds", d.MetricThresholds) + populate(objectMap, "mode", d.Mode) + populate(objectMap, "productionData", d.ProductionData) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "referenceData", d.ReferenceData) + objectMap["signalType"] = MonitoringSignalTypeDataDrift + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataDriftMonitoringSignal. +func (d *DataDriftMonitoringSignal) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataSegment": + err = unpopulate(val, "DataSegment", &d.DataSegment) + delete(rawMsg, key) + case "featureDataTypeOverride": + err = unpopulate(val, "FeatureDataTypeOverride", &d.FeatureDataTypeOverride) + delete(rawMsg, key) + case "features": + d.Features, err = unmarshalMonitoringFeatureFilterBaseClassification(val) + delete(rawMsg, key) + case "metricThresholds": + d.MetricThresholds, err = unmarshalDataDriftMetricThresholdBaseClassificationArray(val) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &d.Mode) + delete(rawMsg, key) + case "productionData": + d.ProductionData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &d.Properties) + delete(rawMsg, key) + case "referenceData": + d.ReferenceData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "signalType": + err = unpopulate(val, "SignalType", &d.SignalType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DataFactory. +func (d DataFactory) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "computeLocation", d.ComputeLocation) + objectMap["computeType"] = ComputeTypeDataFactory + populateTimeRFC3339(objectMap, "createdOn", d.CreatedOn) + populate(objectMap, "description", d.Description) + populate(objectMap, "disableLocalAuth", d.DisableLocalAuth) + populate(objectMap, "isAttachedCompute", d.IsAttachedCompute) + populateTimeRFC3339(objectMap, "modifiedOn", d.ModifiedOn) + populate(objectMap, "provisioningErrors", d.ProvisioningErrors) + populate(objectMap, "provisioningState", d.ProvisioningState) + populate(objectMap, "resourceId", d.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataFactory. +func (d *DataFactory) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "computeLocation": + err = unpopulate(val, "ComputeLocation", &d.ComputeLocation) + delete(rawMsg, key) case "computeType": err = unpopulate(val, "ComputeType", &d.ComputeType) delete(rawMsg, key) @@ -4412,6 +5964,108 @@ func (d *DataFactory) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type DataImport. +func (d DataImport) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "assetName", d.AssetName) + populate(objectMap, "autoDeleteSetting", d.AutoDeleteSetting) + objectMap["dataType"] = DataTypeURIFolder + populate(objectMap, "dataUri", d.DataURI) + populate(objectMap, "description", d.Description) + populate(objectMap, "intellectualProperty", d.IntellectualProperty) + populate(objectMap, "isAnonymous", d.IsAnonymous) + populate(objectMap, "isArchived", d.IsArchived) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "source", d.Source) + populate(objectMap, "stage", d.Stage) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataImport. +func (d *DataImport) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "assetName": + err = unpopulate(val, "AssetName", &d.AssetName) + delete(rawMsg, key) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &d.AutoDeleteSetting) + delete(rawMsg, key) + case "dataType": + err = unpopulate(val, "DataType", &d.DataType) + delete(rawMsg, key) + case "dataUri": + err = unpopulate(val, "DataURI", &d.DataURI) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &d.Description) + delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &d.IntellectualProperty) + delete(rawMsg, key) + case "isAnonymous": + err = unpopulate(val, "IsAnonymous", &d.IsAnonymous) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &d.IsArchived) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &d.Properties) + delete(rawMsg, key) + case "source": + d.Source, err = unmarshalDataImportSourceClassification(val) + delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &d.Stage) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DataImportSource. +func (d DataImportSource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "connection", d.Connection) + objectMap["sourceType"] = d.SourceType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataImportSource. +func (d *DataImportSource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "connection": + err = unpopulate(val, "Connection", &d.Connection) + delete(rawMsg, key) + case "sourceType": + err = unpopulate(val, "SourceType", &d.SourceType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type DataLakeAnalytics. func (d DataLakeAnalytics) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -4568,6 +6222,92 @@ func (d *DataPathAssetReference) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type DataQualityMetricThresholdBase. +func (d DataQualityMetricThresholdBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["dataType"] = d.DataType + populate(objectMap, "threshold", d.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataQualityMetricThresholdBase. +func (d *DataQualityMetricThresholdBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataType": + err = unpopulate(val, "DataType", &d.DataType) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &d.Threshold) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DataQualityMonitoringSignal. +func (d DataQualityMonitoringSignal) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "featureDataTypeOverride", d.FeatureDataTypeOverride) + populate(objectMap, "features", d.Features) + populate(objectMap, "metricThresholds", d.MetricThresholds) + populate(objectMap, "mode", d.Mode) + populate(objectMap, "productionData", d.ProductionData) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "referenceData", d.ReferenceData) + objectMap["signalType"] = MonitoringSignalTypeDataQuality + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataQualityMonitoringSignal. +func (d *DataQualityMonitoringSignal) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "featureDataTypeOverride": + err = unpopulate(val, "FeatureDataTypeOverride", &d.FeatureDataTypeOverride) + delete(rawMsg, key) + case "features": + d.Features, err = unmarshalMonitoringFeatureFilterBaseClassification(val) + delete(rawMsg, key) + case "metricThresholds": + d.MetricThresholds, err = unmarshalDataQualityMetricThresholdBaseClassificationArray(val) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &d.Mode) + delete(rawMsg, key) + case "productionData": + d.ProductionData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &d.Properties) + delete(rawMsg, key) + case "referenceData": + d.ReferenceData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "signalType": + err = unpopulate(val, "SignalType", &d.SignalType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type DataVersionBase. func (d DataVersionBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -4614,12 +6354,15 @@ func (d *DataVersionBase) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type DataVersionBaseProperties. func (d DataVersionBaseProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", d.AutoDeleteSetting) objectMap["dataType"] = d.DataType populate(objectMap, "dataUri", d.DataURI) populate(objectMap, "description", d.Description) + populate(objectMap, "intellectualProperty", d.IntellectualProperty) populate(objectMap, "isAnonymous", d.IsAnonymous) populate(objectMap, "isArchived", d.IsArchived) populate(objectMap, "properties", d.Properties) + populate(objectMap, "stage", d.Stage) populate(objectMap, "tags", d.Tags) return json.Marshal(objectMap) } @@ -4633,6 +6376,9 @@ func (d *DataVersionBaseProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &d.AutoDeleteSetting) + delete(rawMsg, key) case "dataType": err = unpopulate(val, "DataType", &d.DataType) delete(rawMsg, key) @@ -4642,6 +6388,9 @@ func (d *DataVersionBaseProperties) UnmarshalJSON(data []byte) error { case "description": err = unpopulate(val, "Description", &d.Description) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &d.IntellectualProperty) + delete(rawMsg, key) case "isAnonymous": err = unpopulate(val, "IsAnonymous", &d.IsAnonymous) delete(rawMsg, key) @@ -4651,6 +6400,9 @@ func (d *DataVersionBaseProperties) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &d.Properties) delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &d.Stage) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &d.Tags) delete(rawMsg, key) @@ -4693,6 +6445,53 @@ func (d *DataVersionBaseResourceArmPaginatedResult) UnmarshalJSON(data []byte) e return nil } +// MarshalJSON implements the json.Marshaller interface for type DatabaseSource. +func (d DatabaseSource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "connection", d.Connection) + populate(objectMap, "query", d.Query) + objectMap["sourceType"] = DataImportSourceTypeDatabase + populate(objectMap, "storedProcedure", d.StoredProcedure) + populate(objectMap, "storedProcedureParams", d.StoredProcedureParams) + populate(objectMap, "tableName", d.TableName) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DatabaseSource. +func (d *DatabaseSource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "connection": + err = unpopulate(val, "Connection", &d.Connection) + delete(rawMsg, key) + case "query": + err = unpopulate(val, "Query", &d.Query) + delete(rawMsg, key) + case "sourceType": + err = unpopulate(val, "SourceType", &d.SourceType) + delete(rawMsg, key) + case "storedProcedure": + err = unpopulate(val, "StoredProcedure", &d.StoredProcedure) + delete(rawMsg, key) + case "storedProcedureParams": + err = unpopulate(val, "StoredProcedureParams", &d.StoredProcedureParams) + delete(rawMsg, key) + case "tableName": + err = unpopulate(val, "TableName", &d.TableName) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type Databricks. func (d Databricks) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -4876,6 +6675,53 @@ func (d *DatabricksSchema) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type DatasetExportSummary. +func (d DatasetExportSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeRFC3339(objectMap, "endDateTime", d.EndDateTime) + populate(objectMap, "exportedRowCount", d.ExportedRowCount) + objectMap["format"] = ExportFormatTypeDataset + populate(objectMap, "labeledAssetName", d.LabeledAssetName) + populate(objectMap, "labelingJobId", d.LabelingJobID) + populateTimeRFC3339(objectMap, "startDateTime", d.StartDateTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DatasetExportSummary. +func (d *DatasetExportSummary) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endDateTime": + err = unpopulateTimeRFC3339(val, "EndDateTime", &d.EndDateTime) + delete(rawMsg, key) + case "exportedRowCount": + err = unpopulate(val, "ExportedRowCount", &d.ExportedRowCount) + delete(rawMsg, key) + case "format": + err = unpopulate(val, "Format", &d.Format) + delete(rawMsg, key) + case "labeledAssetName": + err = unpopulate(val, "LabeledAssetName", &d.LabeledAssetName) + delete(rawMsg, key) + case "labelingJobId": + err = unpopulate(val, "LabelingJobID", &d.LabelingJobID) + delete(rawMsg, key) + case "startDateTime": + err = unpopulateTimeRFC3339(val, "StartDateTime", &d.StartDateTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type Datastore. func (d Datastore) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -4952,6 +6798,7 @@ func (d DatastoreProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "credentials", d.Credentials) objectMap["datastoreType"] = d.DatastoreType populate(objectMap, "description", d.Description) + populate(objectMap, "intellectualProperty", d.IntellectualProperty) populate(objectMap, "isDefault", d.IsDefault) populate(objectMap, "properties", d.Properties) populate(objectMap, "tags", d.Tags) @@ -4976,6 +6823,9 @@ func (d *DatastoreProperties) UnmarshalJSON(data []byte) error { case "description": err = unpopulate(val, "Description", &d.Description) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &d.IntellectualProperty) + delete(rawMsg, key) case "isDefault": err = unpopulate(val, "IsDefault", &d.IsDefault) delete(rawMsg, key) @@ -5141,6 +6991,8 @@ func (d DeploymentResourceConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "instanceCount", d.InstanceCount) populate(objectMap, "instanceType", d.InstanceType) + populate(objectMap, "locations", d.Locations) + populate(objectMap, "maxInstanceCount", d.MaxInstanceCount) populate(objectMap, "properties", d.Properties) return json.Marshal(objectMap) } @@ -5160,6 +7012,12 @@ func (d *DeploymentResourceConfiguration) UnmarshalJSON(data []byte) error { case "instanceType": err = unpopulate(val, "InstanceType", &d.InstanceType) delete(rawMsg, key) + case "locations": + err = unpopulate(val, "Locations", &d.Locations) + delete(rawMsg, key) + case "maxInstanceCount": + err = unpopulate(val, "MaxInstanceCount", &d.MaxInstanceCount) + delete(rawMsg, key) case "properties": err = unpopulate(val, "Properties", &d.Properties) delete(rawMsg, key) @@ -5405,6 +7263,48 @@ func (d *DistributionConfiguration) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type Docker. +func (d Docker) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "privileged", d.Privileged) + if d.AdditionalProperties != nil { + for key, val := range d.AdditionalProperties { + objectMap[key] = val + } + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Docker. +func (d *Docker) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "privileged": + err = unpopulate(val, "Privileged", &d.Privileged) + delete(rawMsg, key) + default: + if d.AdditionalProperties == nil { + d.AdditionalProperties = map[string]any{} + } + if val != nil { + var aux any + err = json.Unmarshal(val, &aux) + d.AdditionalProperties[key] = aux + } + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type EarlyTerminationPolicy. func (e EarlyTerminationPolicy) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -5440,17 +7340,16 @@ func (e *EarlyTerminationPolicy) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type EncryptionKeyVaultProperties. -func (e EncryptionKeyVaultProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type EmailMonitoringAlertNotificationSettings. +func (e EmailMonitoringAlertNotificationSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "identityClientId", e.IdentityClientID) - populate(objectMap, "keyIdentifier", e.KeyIdentifier) - populate(objectMap, "keyVaultArmId", e.KeyVaultArmID) + objectMap["alertNotificationType"] = MonitoringAlertNotificationTypeEmail + populate(objectMap, "emailNotificationSetting", e.EmailNotificationSetting) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type EncryptionKeyVaultProperties. -func (e *EncryptionKeyVaultProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type EmailMonitoringAlertNotificationSettings. +func (e *EmailMonitoringAlertNotificationSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", e, err) @@ -5458,15 +7357,39 @@ func (e *EncryptionKeyVaultProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "identityClientId": - err = unpopulate(val, "IdentityClientID", &e.IdentityClientID) + case "alertNotificationType": + err = unpopulate(val, "AlertNotificationType", &e.AlertNotificationType) delete(rawMsg, key) + case "emailNotificationSetting": + err = unpopulate(val, "EmailNotificationSetting", &e.EmailNotificationSetting) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EncryptionKeyVaultUpdateProperties. +func (e EncryptionKeyVaultUpdateProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "keyIdentifier", e.KeyIdentifier) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EncryptionKeyVaultUpdateProperties. +func (e *EncryptionKeyVaultUpdateProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { case "keyIdentifier": err = unpopulate(val, "KeyIdentifier", &e.KeyIdentifier) delete(rawMsg, key) - case "keyVaultArmId": - err = unpopulate(val, "KeyVaultArmID", &e.KeyVaultArmID) - delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", e, err) @@ -5478,9 +7401,12 @@ func (e *EncryptionKeyVaultProperties) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type EncryptionProperty. func (e EncryptionProperty) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "cosmosDbResourceId", e.CosmosDbResourceID) populate(objectMap, "identity", e.Identity) populate(objectMap, "keyVaultProperties", e.KeyVaultProperties) + populate(objectMap, "searchAccountResourceId", e.SearchAccountResourceID) populate(objectMap, "status", e.Status) + populate(objectMap, "storageAccountResourceId", e.StorageAccountResourceID) return json.Marshal(objectMap) } @@ -5493,15 +7419,94 @@ func (e *EncryptionProperty) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "cosmosDbResourceId": + err = unpopulate(val, "CosmosDbResourceID", &e.CosmosDbResourceID) + delete(rawMsg, key) case "identity": err = unpopulate(val, "Identity", &e.Identity) delete(rawMsg, key) case "keyVaultProperties": err = unpopulate(val, "KeyVaultProperties", &e.KeyVaultProperties) delete(rawMsg, key) + case "searchAccountResourceId": + err = unpopulate(val, "SearchAccountResourceID", &e.SearchAccountResourceID) + delete(rawMsg, key) case "status": err = unpopulate(val, "Status", &e.Status) delete(rawMsg, key) + case "storageAccountResourceId": + err = unpopulate(val, "StorageAccountResourceID", &e.StorageAccountResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EncryptionUpdateProperties. +func (e EncryptionUpdateProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "keyVaultProperties", e.KeyVaultProperties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EncryptionUpdateProperties. +func (e *EncryptionUpdateProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "keyVaultProperties": + err = unpopulate(val, "KeyVaultProperties", &e.KeyVaultProperties) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Endpoint. +func (e Endpoint) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "hostIp", e.HostIP) + populate(objectMap, "name", e.Name) + populate(objectMap, "protocol", e.Protocol) + populate(objectMap, "published", e.Published) + populate(objectMap, "target", e.Target) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Endpoint. +func (e *Endpoint) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "hostIp": + err = unpopulate(val, "HostIP", &e.HostIP) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &e.Name) + delete(rawMsg, key) + case "protocol": + err = unpopulate(val, "Protocol", &e.Protocol) + delete(rawMsg, key) + case "published": + err = unpopulate(val, "Published", &e.Published) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &e.Target) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", e, err) @@ -5752,6 +7757,7 @@ func (e EnvironmentContainerProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "latestVersion", e.LatestVersion) populate(objectMap, "nextVersion", e.NextVersion) populate(objectMap, "properties", e.Properties) + populate(objectMap, "provisioningState", e.ProvisioningState) populate(objectMap, "tags", e.Tags) return json.Marshal(objectMap) } @@ -5780,6 +7786,9 @@ func (e *EnvironmentContainerProperties) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &e.Properties) delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &e.ProvisioningState) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &e.Tags) delete(rawMsg, key) @@ -5822,6 +7831,52 @@ func (e *EnvironmentContainerResourceArmPaginatedResult) UnmarshalJSON(data []by return nil } +// MarshalJSON implements the json.Marshaller interface for type EnvironmentVariable. +func (e EnvironmentVariable) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "type", e.Type) + populate(objectMap, "value", e.Value) + if e.AdditionalProperties != nil { + for key, val := range e.AdditionalProperties { + objectMap[key] = val + } + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EnvironmentVariable. +func (e *EnvironmentVariable) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "Type", &e.Type) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &e.Value) + delete(rawMsg, key) + default: + if e.AdditionalProperties == nil { + e.AdditionalProperties = map[string]any{} + } + if val != nil { + var aux any + err = json.Unmarshal(val, &aux) + e.AdditionalProperties[key] = aux + } + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type EnvironmentVersion. func (e EnvironmentVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -5868,6 +7923,7 @@ func (e *EnvironmentVersion) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type EnvironmentVersionProperties. func (e EnvironmentVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", e.AutoDeleteSetting) populate(objectMap, "autoRebuild", e.AutoRebuild) populate(objectMap, "build", e.Build) populate(objectMap, "condaFile", e.CondaFile) @@ -5875,10 +7931,13 @@ func (e EnvironmentVersionProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "environmentType", e.EnvironmentType) populate(objectMap, "image", e.Image) populate(objectMap, "inferenceConfig", e.InferenceConfig) + populate(objectMap, "intellectualProperty", e.IntellectualProperty) populate(objectMap, "isAnonymous", e.IsAnonymous) populate(objectMap, "isArchived", e.IsArchived) populate(objectMap, "osType", e.OSType) populate(objectMap, "properties", e.Properties) + populate(objectMap, "provisioningState", e.ProvisioningState) + populate(objectMap, "stage", e.Stage) populate(objectMap, "tags", e.Tags) return json.Marshal(objectMap) } @@ -5892,6 +7951,9 @@ func (e *EnvironmentVersionProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &e.AutoDeleteSetting) + delete(rawMsg, key) case "autoRebuild": err = unpopulate(val, "AutoRebuild", &e.AutoRebuild) delete(rawMsg, key) @@ -5913,6 +7975,9 @@ func (e *EnvironmentVersionProperties) UnmarshalJSON(data []byte) error { case "inferenceConfig": err = unpopulate(val, "InferenceConfig", &e.InferenceConfig) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &e.IntellectualProperty) + delete(rawMsg, key) case "isAnonymous": err = unpopulate(val, "IsAnonymous", &e.IsAnonymous) delete(rawMsg, key) @@ -5925,6 +7990,12 @@ func (e *EnvironmentVersionProperties) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &e.Properties) delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &e.ProvisioningState) + delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &e.Stage) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &e.Tags) delete(rawMsg, key) @@ -6138,6 +8209,49 @@ func (e *EstimatedVMPrices) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ExportSummary. +func (e ExportSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeRFC3339(objectMap, "endDateTime", e.EndDateTime) + populate(objectMap, "exportedRowCount", e.ExportedRowCount) + objectMap["format"] = e.Format + populate(objectMap, "labelingJobId", e.LabelingJobID) + populateTimeRFC3339(objectMap, "startDateTime", e.StartDateTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ExportSummary. +func (e *ExportSummary) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endDateTime": + err = unpopulateTimeRFC3339(val, "EndDateTime", &e.EndDateTime) + delete(rawMsg, key) + case "exportedRowCount": + err = unpopulate(val, "ExportedRowCount", &e.ExportedRowCount) + delete(rawMsg, key) + case "format": + err = unpopulate(val, "Format", &e.Format) + delete(rawMsg, key) + case "labelingJobId": + err = unpopulate(val, "LabelingJobID", &e.LabelingJobID) + delete(rawMsg, key) + case "startDateTime": + err = unpopulateTimeRFC3339(val, "StartDateTime", &e.StartDateTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ExternalFQDNResponse. func (e ExternalFQDNResponse) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -6226,7 +8340,8 @@ func (f *FQDNEndpointDetail) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type FQDNEndpoints. func (f FQDNEndpoints) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "properties", f.Properties) + populate(objectMap, "category", f.Category) + populate(objectMap, "endpoints", f.Endpoints) return json.Marshal(objectMap) } @@ -6239,8 +8354,11 @@ func (f *FQDNEndpoints) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "properties": - err = unpopulate(val, "Properties", &f.Properties) + case "category": + err = unpopulate(val, "Category", &f.Category) + delete(rawMsg, key) + case "endpoints": + err = unpopulate(val, "Endpoints", &f.Endpoints) delete(rawMsg, key) } if err != nil { @@ -6250,16 +8368,15 @@ func (f *FQDNEndpoints) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type FQDNEndpointsProperties. -func (f FQDNEndpointsProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FQDNEndpointsPropertyBag. +func (f FQDNEndpointsPropertyBag) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "category", f.Category) - populate(objectMap, "endpoints", f.Endpoints) + populate(objectMap, "properties", f.Properties) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type FQDNEndpointsProperties. -func (f *FQDNEndpointsProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FQDNEndpointsPropertyBag. +func (f *FQDNEndpointsPropertyBag) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", f, err) @@ -6267,11 +8384,8 @@ func (f *FQDNEndpointsProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "category": - err = unpopulate(val, "Category", &f.Category) - delete(rawMsg, key) - case "endpoints": - err = unpopulate(val, "Endpoints", &f.Endpoints) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) } if err != nil { @@ -6281,15 +8395,19 @@ func (f *FQDNEndpointsProperties) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type FeaturizationSettings. -func (f FeaturizationSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Feature. +func (f Feature) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "datasetLanguage", f.DatasetLanguage) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturizationSettings. -func (f *FeaturizationSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Feature. +func (f *Feature) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", f, err) @@ -6297,8 +8415,20 @@ func (f *FeaturizationSettings) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "datasetLanguage": - err = unpopulate(val, "DatasetLanguage", &f.DatasetLanguage) + case "id": + err = unpopulate(val, "ID", &f.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &f.Type) delete(rawMsg, key) } if err != nil { @@ -6308,15 +8438,20 @@ func (f *FeaturizationSettings) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type FlavorData. -func (f FlavorData) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeatureAttributionDriftMonitoringSignal. +func (f FeatureAttributionDriftMonitoringSignal) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "data", f.Data) + populate(objectMap, "metricThreshold", f.MetricThreshold) + populate(objectMap, "mode", f.Mode) + populate(objectMap, "productionData", f.ProductionData) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "referenceData", f.ReferenceData) + objectMap["signalType"] = MonitoringSignalTypeFeatureAttributionDrift return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type FlavorData. -func (f *FlavorData) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeatureAttributionDriftMonitoringSignal. +func (f *FeatureAttributionDriftMonitoringSignal) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", f, err) @@ -6324,8 +8459,23 @@ func (f *FlavorData) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "data": - err = unpopulate(val, "Data", &f.Data) + case "metricThreshold": + err = unpopulate(val, "MetricThreshold", &f.MetricThreshold) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &f.Mode) + delete(rawMsg, key) + case "productionData": + f.ProductionData, err = unmarshalMonitoringInputDataBaseClassificationArray(val) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "referenceData": + f.ReferenceData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "signalType": + err = unpopulate(val, "SignalType", &f.SignalType) delete(rawMsg, key) } if err != nil { @@ -6335,15 +8485,16 @@ func (f *FlavorData) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ForecastHorizon. -func (f ForecastHorizon) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeatureAttributionMetricThreshold. +func (f FeatureAttributionMetricThreshold) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["mode"] = f.Mode + populate(objectMap, "metric", f.Metric) + populate(objectMap, "threshold", f.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ForecastHorizon. -func (f *ForecastHorizon) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeatureAttributionMetricThreshold. +func (f *FeatureAttributionMetricThreshold) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", f, err) @@ -6351,8 +8502,11 @@ func (f *ForecastHorizon) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "mode": - err = unpopulate(val, "Mode", &f.Mode) + case "metric": + err = unpopulate(val, "Metric", &f.Metric) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &f.Threshold) delete(rawMsg, key) } if err != nil { @@ -6362,30 +8516,19 @@ func (f *ForecastHorizon) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type Forecasting. -func (f Forecasting) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeatureProperties. +func (f FeatureProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "cvSplitColumnNames", f.CvSplitColumnNames) - populate(objectMap, "featurizationSettings", f.FeaturizationSettings) - populate(objectMap, "forecastingSettings", f.ForecastingSettings) - populate(objectMap, "limitSettings", f.LimitSettings) - populate(objectMap, "logVerbosity", f.LogVerbosity) - populate(objectMap, "nCrossValidations", f.NCrossValidations) - populate(objectMap, "primaryMetric", f.PrimaryMetric) - populate(objectMap, "targetColumnName", f.TargetColumnName) - objectMap["taskType"] = TaskTypeForecasting - populate(objectMap, "testData", f.TestData) - populate(objectMap, "testDataSize", f.TestDataSize) - populate(objectMap, "trainingData", f.TrainingData) - populate(objectMap, "trainingSettings", f.TrainingSettings) - populate(objectMap, "validationData", f.ValidationData) - populate(objectMap, "validationDataSize", f.ValidationDataSize) - populate(objectMap, "weightColumnName", f.WeightColumnName) + populate(objectMap, "dataType", f.DataType) + populate(objectMap, "description", f.Description) + populate(objectMap, "featureName", f.FeatureName) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "tags", f.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Forecasting. -func (f *Forecasting) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeatureProperties. +func (f *FeatureProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", f, err) @@ -6393,53 +8536,20 @@ func (f *Forecasting) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "cvSplitColumnNames": - err = unpopulate(val, "CvSplitColumnNames", &f.CvSplitColumnNames) + case "dataType": + err = unpopulate(val, "DataType", &f.DataType) delete(rawMsg, key) - case "featurizationSettings": - err = unpopulate(val, "FeaturizationSettings", &f.FeaturizationSettings) - delete(rawMsg, key) - case "forecastingSettings": - err = unpopulate(val, "ForecastingSettings", &f.ForecastingSettings) - delete(rawMsg, key) - case "limitSettings": - err = unpopulate(val, "LimitSettings", &f.LimitSettings) - delete(rawMsg, key) - case "logVerbosity": - err = unpopulate(val, "LogVerbosity", &f.LogVerbosity) - delete(rawMsg, key) - case "nCrossValidations": - f.NCrossValidations, err = unmarshalNCrossValidationsClassification(val) - delete(rawMsg, key) - case "primaryMetric": - err = unpopulate(val, "PrimaryMetric", &f.PrimaryMetric) - delete(rawMsg, key) - case "targetColumnName": - err = unpopulate(val, "TargetColumnName", &f.TargetColumnName) - delete(rawMsg, key) - case "taskType": - err = unpopulate(val, "TaskType", &f.TaskType) - delete(rawMsg, key) - case "testData": - err = unpopulate(val, "TestData", &f.TestData) - delete(rawMsg, key) - case "testDataSize": - err = unpopulate(val, "TestDataSize", &f.TestDataSize) - delete(rawMsg, key) - case "trainingData": - err = unpopulate(val, "TrainingData", &f.TrainingData) - delete(rawMsg, key) - case "trainingSettings": - err = unpopulate(val, "TrainingSettings", &f.TrainingSettings) + case "description": + err = unpopulate(val, "Description", &f.Description) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &f.ValidationData) + case "featureName": + err = unpopulate(val, "FeatureName", &f.FeatureName) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &f.ValidationDataSize) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) - case "weightColumnName": - err = unpopulate(val, "WeightColumnName", &f.WeightColumnName) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) delete(rawMsg, key) } if err != nil { @@ -6449,27 +8559,16 @@ func (f *Forecasting) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ForecastingSettings. -func (f ForecastingSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeatureResourceArmPaginatedResult. +func (f FeatureResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "countryOrRegionForHolidays", f.CountryOrRegionForHolidays) - populate(objectMap, "cvStepSize", f.CvStepSize) - populate(objectMap, "featureLags", f.FeatureLags) - populate(objectMap, "forecastHorizon", f.ForecastHorizon) - populate(objectMap, "frequency", f.Frequency) - populate(objectMap, "seasonality", f.Seasonality) - populate(objectMap, "shortSeriesHandlingConfig", f.ShortSeriesHandlingConfig) - populate(objectMap, "targetAggregateFunction", f.TargetAggregateFunction) - populate(objectMap, "targetLags", f.TargetLags) - populate(objectMap, "targetRollingWindowSize", f.TargetRollingWindowSize) - populate(objectMap, "timeColumnName", f.TimeColumnName) - populate(objectMap, "timeSeriesIdColumnNames", f.TimeSeriesIDColumnNames) - populate(objectMap, "useStl", f.UseStl) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ForecastingSettings. -func (f *ForecastingSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeatureResourceArmPaginatedResult. +func (f *FeatureResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", f, err) @@ -6477,44 +8576,11 @@ func (f *ForecastingSettings) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "countryOrRegionForHolidays": - err = unpopulate(val, "CountryOrRegionForHolidays", &f.CountryOrRegionForHolidays) - delete(rawMsg, key) - case "cvStepSize": - err = unpopulate(val, "CvStepSize", &f.CvStepSize) - delete(rawMsg, key) - case "featureLags": - err = unpopulate(val, "FeatureLags", &f.FeatureLags) - delete(rawMsg, key) - case "forecastHorizon": - f.ForecastHorizon, err = unmarshalForecastHorizonClassification(val) - delete(rawMsg, key) - case "frequency": - err = unpopulate(val, "Frequency", &f.Frequency) - delete(rawMsg, key) - case "seasonality": - f.Seasonality, err = unmarshalSeasonalityClassification(val) - delete(rawMsg, key) - case "shortSeriesHandlingConfig": - err = unpopulate(val, "ShortSeriesHandlingConfig", &f.ShortSeriesHandlingConfig) - delete(rawMsg, key) - case "targetAggregateFunction": - err = unpopulate(val, "TargetAggregateFunction", &f.TargetAggregateFunction) - delete(rawMsg, key) - case "targetLags": - f.TargetLags, err = unmarshalTargetLagsClassification(val) - delete(rawMsg, key) - case "targetRollingWindowSize": - f.TargetRollingWindowSize, err = unmarshalTargetRollingWindowSizeClassification(val) - delete(rawMsg, key) - case "timeColumnName": - err = unpopulate(val, "TimeColumnName", &f.TimeColumnName) - delete(rawMsg, key) - case "timeSeriesIdColumnNames": - err = unpopulate(val, "TimeSeriesIDColumnNames", &f.TimeSeriesIDColumnNames) + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) delete(rawMsg, key) - case "useStl": - err = unpopulate(val, "UseStl", &f.UseStl) + case "value": + err = unpopulate(val, "Value", &f.Value) delete(rawMsg, key) } if err != nil { @@ -6524,23 +8590,17 @@ func (f *ForecastingSettings) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ForecastingTrainingSettings. -func (f ForecastingTrainingSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeatureStoreSettings. +func (f FeatureStoreSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "allowedTrainingAlgorithms", f.AllowedTrainingAlgorithms) - populate(objectMap, "blockedTrainingAlgorithms", f.BlockedTrainingAlgorithms) - populate(objectMap, "enableDnnTraining", f.EnableDnnTraining) - populate(objectMap, "enableModelExplainability", f.EnableModelExplainability) - populate(objectMap, "enableOnnxCompatibleModels", f.EnableOnnxCompatibleModels) - populate(objectMap, "enableStackEnsemble", f.EnableStackEnsemble) - populate(objectMap, "enableVoteEnsemble", f.EnableVoteEnsemble) - populate(objectMap, "ensembleModelDownloadTimeout", f.EnsembleModelDownloadTimeout) - populate(objectMap, "stackEnsembleSettings", f.StackEnsembleSettings) + populate(objectMap, "computeRuntime", f.ComputeRuntime) + populate(objectMap, "offlineStoreConnectionName", f.OfflineStoreConnectionName) + populate(objectMap, "onlineStoreConnectionName", f.OnlineStoreConnectionName) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ForecastingTrainingSettings. -func (f *ForecastingTrainingSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeatureStoreSettings. +func (f *FeatureStoreSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", f, err) @@ -6548,32 +8608,14 @@ func (f *ForecastingTrainingSettings) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "allowedTrainingAlgorithms": - err = unpopulate(val, "AllowedTrainingAlgorithms", &f.AllowedTrainingAlgorithms) - delete(rawMsg, key) - case "blockedTrainingAlgorithms": - err = unpopulate(val, "BlockedTrainingAlgorithms", &f.BlockedTrainingAlgorithms) - delete(rawMsg, key) - case "enableDnnTraining": - err = unpopulate(val, "EnableDnnTraining", &f.EnableDnnTraining) - delete(rawMsg, key) - case "enableModelExplainability": - err = unpopulate(val, "EnableModelExplainability", &f.EnableModelExplainability) - delete(rawMsg, key) - case "enableOnnxCompatibleModels": - err = unpopulate(val, "EnableOnnxCompatibleModels", &f.EnableOnnxCompatibleModels) + case "computeRuntime": + err = unpopulate(val, "ComputeRuntime", &f.ComputeRuntime) delete(rawMsg, key) - case "enableStackEnsemble": - err = unpopulate(val, "EnableStackEnsemble", &f.EnableStackEnsemble) - delete(rawMsg, key) - case "enableVoteEnsemble": - err = unpopulate(val, "EnableVoteEnsemble", &f.EnableVoteEnsemble) - delete(rawMsg, key) - case "ensembleModelDownloadTimeout": - err = unpopulate(val, "EnsembleModelDownloadTimeout", &f.EnsembleModelDownloadTimeout) + case "offlineStoreConnectionName": + err = unpopulate(val, "OfflineStoreConnectionName", &f.OfflineStoreConnectionName) delete(rawMsg, key) - case "stackEnsembleSettings": - err = unpopulate(val, "StackEnsembleSettings", &f.StackEnsembleSettings) + case "onlineStoreConnectionName": + err = unpopulate(val, "OnlineStoreConnectionName", &f.OnlineStoreConnectionName) delete(rawMsg, key) } if err != nil { @@ -6583,1374 +8625,1584 @@ func (f *ForecastingTrainingSettings) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type GridSamplingAlgorithm. -func (g GridSamplingAlgorithm) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeatureSubset. +func (f FeatureSubset) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["samplingAlgorithmType"] = SamplingAlgorithmTypeGrid + populate(objectMap, "features", f.Features) + objectMap["filterType"] = MonitoringFeatureFilterTypeFeatureSubset return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type GridSamplingAlgorithm. -func (g *GridSamplingAlgorithm) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeatureSubset. +func (f *FeatureSubset) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", g, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "samplingAlgorithmType": - err = unpopulate(val, "SamplingAlgorithmType", &g.SamplingAlgorithmType) + case "features": + err = unpopulate(val, "Features", &f.Features) + delete(rawMsg, key) + case "filterType": + err = unpopulate(val, "FilterType", &f.FilterType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", g, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type HDInsight. -func (h HDInsight) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeatureWindow. +func (f FeatureWindow) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "computeLocation", h.ComputeLocation) - objectMap["computeType"] = ComputeTypeHDInsight - populateTimeRFC3339(objectMap, "createdOn", h.CreatedOn) - populate(objectMap, "description", h.Description) - populate(objectMap, "disableLocalAuth", h.DisableLocalAuth) - populate(objectMap, "isAttachedCompute", h.IsAttachedCompute) - populateTimeRFC3339(objectMap, "modifiedOn", h.ModifiedOn) - populate(objectMap, "properties", h.Properties) - populate(objectMap, "provisioningErrors", h.ProvisioningErrors) - populate(objectMap, "provisioningState", h.ProvisioningState) - populate(objectMap, "resourceId", h.ResourceID) + populateTimeRFC3339(objectMap, "featureWindowEnd", f.FeatureWindowEnd) + populateTimeRFC3339(objectMap, "featureWindowStart", f.FeatureWindowStart) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type HDInsight. -func (h *HDInsight) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeatureWindow. +func (f *FeatureWindow) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", h, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "computeLocation": - err = unpopulate(val, "ComputeLocation", &h.ComputeLocation) - delete(rawMsg, key) - case "computeType": - err = unpopulate(val, "ComputeType", &h.ComputeType) - delete(rawMsg, key) - case "createdOn": - err = unpopulateTimeRFC3339(val, "CreatedOn", &h.CreatedOn) - delete(rawMsg, key) - case "description": - err = unpopulate(val, "Description", &h.Description) - delete(rawMsg, key) - case "disableLocalAuth": - err = unpopulate(val, "DisableLocalAuth", &h.DisableLocalAuth) - delete(rawMsg, key) - case "isAttachedCompute": - err = unpopulate(val, "IsAttachedCompute", &h.IsAttachedCompute) - delete(rawMsg, key) - case "modifiedOn": - err = unpopulateTimeRFC3339(val, "ModifiedOn", &h.ModifiedOn) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &h.Properties) - delete(rawMsg, key) - case "provisioningErrors": - err = unpopulate(val, "ProvisioningErrors", &h.ProvisioningErrors) - delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &h.ProvisioningState) + case "featureWindowEnd": + err = unpopulateTimeRFC3339(val, "FeatureWindowEnd", &f.FeatureWindowEnd) delete(rawMsg, key) - case "resourceId": - err = unpopulate(val, "ResourceID", &h.ResourceID) + case "featureWindowStart": + err = unpopulateTimeRFC3339(val, "FeatureWindowStart", &f.FeatureWindowStart) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", h, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type HDInsightProperties. -func (h HDInsightProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetContainer. +func (f FeaturesetContainer) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "address", h.Address) - populate(objectMap, "administratorAccount", h.AdministratorAccount) - populate(objectMap, "sshPort", h.SSHPort) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type HDInsightProperties. -func (h *HDInsightProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetContainer. +func (f *FeaturesetContainer) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", h, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "address": - err = unpopulate(val, "Address", &h.Address) + case "id": + err = unpopulate(val, "ID", &f.ID) delete(rawMsg, key) - case "administratorAccount": - err = unpopulate(val, "AdministratorAccount", &h.AdministratorAccount) + case "name": + err = unpopulate(val, "Name", &f.Name) delete(rawMsg, key) - case "sshPort": - err = unpopulate(val, "SSHPort", &h.SSHPort) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &f.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", h, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type HDInsightSchema. -func (h HDInsightSchema) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetContainerProperties. +func (f FeaturesetContainerProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "properties", h.Properties) + populate(objectMap, "description", f.Description) + populate(objectMap, "isArchived", f.IsArchived) + populate(objectMap, "latestVersion", f.LatestVersion) + populate(objectMap, "nextVersion", f.NextVersion) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "provisioningState", f.ProvisioningState) + populate(objectMap, "tags", f.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type HDInsightSchema. -func (h *HDInsightSchema) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetContainerProperties. +func (f *FeaturesetContainerProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", h, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { + case "description": + err = unpopulate(val, "Description", &f.Description) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &f.IsArchived) + delete(rawMsg, key) + case "latestVersion": + err = unpopulate(val, "LatestVersion", &f.LatestVersion) + delete(rawMsg, key) + case "nextVersion": + err = unpopulate(val, "NextVersion", &f.NextVersion) + delete(rawMsg, key) case "properties": - err = unpopulate(val, "Properties", &h.Properties) + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &f.ProvisioningState) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", h, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type IDAssetReference. -func (i IDAssetReference) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetContainerResourceArmPaginatedResult. +func (f FeaturesetContainerResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "assetId", i.AssetID) - objectMap["referenceType"] = ReferenceTypeID + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type IDAssetReference. -func (i *IDAssetReference) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetContainerResourceArmPaginatedResult. +func (f *FeaturesetContainerResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "assetId": - err = unpopulate(val, "AssetID", &i.AssetID) + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) delete(rawMsg, key) - case "referenceType": - err = unpopulate(val, "ReferenceType", &i.ReferenceType) + case "value": + err = unpopulate(val, "Value", &f.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type IdentityConfiguration. -func (i IdentityConfiguration) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetJob. +func (f FeaturesetJob) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["identityType"] = i.IdentityType + populateTimeRFC3339(objectMap, "createdDate", f.CreatedDate) + populate(objectMap, "displayName", f.DisplayName) + populate(objectMap, "duration", f.Duration) + populate(objectMap, "experimentId", f.ExperimentID) + populate(objectMap, "featureWindow", f.FeatureWindow) + populate(objectMap, "jobId", f.JobID) + populate(objectMap, "status", f.Status) + populate(objectMap, "tags", f.Tags) + populate(objectMap, "type", f.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type IdentityConfiguration. -func (i *IdentityConfiguration) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetJob. +func (f *FeaturesetJob) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "identityType": - err = unpopulate(val, "IdentityType", &i.IdentityType) + case "createdDate": + err = unpopulateTimeRFC3339(val, "CreatedDate", &f.CreatedDate) + delete(rawMsg, key) + case "displayName": + err = unpopulate(val, "DisplayName", &f.DisplayName) + delete(rawMsg, key) + case "duration": + err = unpopulate(val, "Duration", &f.Duration) + delete(rawMsg, key) + case "experimentId": + err = unpopulate(val, "ExperimentID", &f.ExperimentID) + delete(rawMsg, key) + case "featureWindow": + err = unpopulate(val, "FeatureWindow", &f.FeatureWindow) + delete(rawMsg, key) + case "jobId": + err = unpopulate(val, "JobID", &f.JobID) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &f.Status) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &f.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type IdentityForCmk. -func (i IdentityForCmk) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetJobArmPaginatedResult. +func (f FeaturesetJobArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "userAssignedIdentity", i.UserAssignedIdentity) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type IdentityForCmk. -func (i *IdentityForCmk) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetJobArmPaginatedResult. +func (f *FeaturesetJobArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "userAssignedIdentity": - err = unpopulate(val, "UserAssignedIdentity", &i.UserAssignedIdentity) + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &f.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageClassification. -func (i ImageClassification) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetSpecification. +func (f FeaturesetSpecification) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limitSettings", i.LimitSettings) - populate(objectMap, "logVerbosity", i.LogVerbosity) - populate(objectMap, "modelSettings", i.ModelSettings) - populate(objectMap, "primaryMetric", i.PrimaryMetric) - populate(objectMap, "searchSpace", i.SearchSpace) - populate(objectMap, "sweepSettings", i.SweepSettings) - populate(objectMap, "targetColumnName", i.TargetColumnName) - objectMap["taskType"] = TaskTypeImageClassification - populate(objectMap, "trainingData", i.TrainingData) - populate(objectMap, "validationData", i.ValidationData) - populate(objectMap, "validationDataSize", i.ValidationDataSize) + populate(objectMap, "path", f.Path) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageClassification. -func (i *ImageClassification) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetSpecification. +func (f *FeaturesetSpecification) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "limitSettings": - err = unpopulate(val, "LimitSettings", &i.LimitSettings) - delete(rawMsg, key) - case "logVerbosity": - err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) - delete(rawMsg, key) - case "modelSettings": - err = unpopulate(val, "ModelSettings", &i.ModelSettings) - delete(rawMsg, key) - case "primaryMetric": - err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) - delete(rawMsg, key) - case "searchSpace": - err = unpopulate(val, "SearchSpace", &i.SearchSpace) - delete(rawMsg, key) - case "sweepSettings": - err = unpopulate(val, "SweepSettings", &i.SweepSettings) - delete(rawMsg, key) - case "targetColumnName": - err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) - delete(rawMsg, key) - case "taskType": - err = unpopulate(val, "TaskType", &i.TaskType) - delete(rawMsg, key) - case "trainingData": - err = unpopulate(val, "TrainingData", &i.TrainingData) - delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &i.ValidationData) - delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + case "path": + err = unpopulate(val, "Path", &f.Path) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageClassificationBase. -func (i ImageClassificationBase) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetVersion. +func (f FeaturesetVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limitSettings", i.LimitSettings) - populate(objectMap, "modelSettings", i.ModelSettings) - populate(objectMap, "searchSpace", i.SearchSpace) - populate(objectMap, "sweepSettings", i.SweepSettings) - populate(objectMap, "validationData", i.ValidationData) - populate(objectMap, "validationDataSize", i.ValidationDataSize) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageClassificationBase. -func (i *ImageClassificationBase) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetVersion. +func (f *FeaturesetVersion) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "limitSettings": - err = unpopulate(val, "LimitSettings", &i.LimitSettings) - delete(rawMsg, key) - case "modelSettings": - err = unpopulate(val, "ModelSettings", &i.ModelSettings) + case "id": + err = unpopulate(val, "ID", &f.ID) delete(rawMsg, key) - case "searchSpace": - err = unpopulate(val, "SearchSpace", &i.SearchSpace) + case "name": + err = unpopulate(val, "Name", &f.Name) delete(rawMsg, key) - case "sweepSettings": - err = unpopulate(val, "SweepSettings", &i.SweepSettings) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &i.ValidationData) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + case "type": + err = unpopulate(val, "Type", &f.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageClassificationMultilabel. -func (i ImageClassificationMultilabel) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetVersionBackfillRequest. +func (f FeaturesetVersionBackfillRequest) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limitSettings", i.LimitSettings) - populate(objectMap, "logVerbosity", i.LogVerbosity) - populate(objectMap, "modelSettings", i.ModelSettings) - populate(objectMap, "primaryMetric", i.PrimaryMetric) - populate(objectMap, "searchSpace", i.SearchSpace) - populate(objectMap, "sweepSettings", i.SweepSettings) - populate(objectMap, "targetColumnName", i.TargetColumnName) - objectMap["taskType"] = TaskTypeImageClassificationMultilabel - populate(objectMap, "trainingData", i.TrainingData) - populate(objectMap, "validationData", i.ValidationData) - populate(objectMap, "validationDataSize", i.ValidationDataSize) + populate(objectMap, "description", f.Description) + populate(objectMap, "displayName", f.DisplayName) + populate(objectMap, "featureWindow", f.FeatureWindow) + populate(objectMap, "resource", f.Resource) + populate(objectMap, "sparkConfiguration", f.SparkConfiguration) + populate(objectMap, "tags", f.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageClassificationMultilabel. -func (i *ImageClassificationMultilabel) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetVersionBackfillRequest. +func (f *FeaturesetVersionBackfillRequest) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "limitSettings": - err = unpopulate(val, "LimitSettings", &i.LimitSettings) - delete(rawMsg, key) - case "logVerbosity": - err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) - delete(rawMsg, key) - case "modelSettings": - err = unpopulate(val, "ModelSettings", &i.ModelSettings) - delete(rawMsg, key) - case "primaryMetric": - err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) - delete(rawMsg, key) - case "searchSpace": - err = unpopulate(val, "SearchSpace", &i.SearchSpace) - delete(rawMsg, key) - case "sweepSettings": - err = unpopulate(val, "SweepSettings", &i.SweepSettings) + case "description": + err = unpopulate(val, "Description", &f.Description) delete(rawMsg, key) - case "targetColumnName": - err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) + case "displayName": + err = unpopulate(val, "DisplayName", &f.DisplayName) delete(rawMsg, key) - case "taskType": - err = unpopulate(val, "TaskType", &i.TaskType) + case "featureWindow": + err = unpopulate(val, "FeatureWindow", &f.FeatureWindow) delete(rawMsg, key) - case "trainingData": - err = unpopulate(val, "TrainingData", &i.TrainingData) + case "resource": + err = unpopulate(val, "Resource", &f.Resource) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &i.ValidationData) + case "sparkConfiguration": + err = unpopulate(val, "SparkConfiguration", &f.SparkConfiguration) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageInstanceSegmentation. -func (i ImageInstanceSegmentation) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetVersionProperties. +func (f FeaturesetVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limitSettings", i.LimitSettings) - populate(objectMap, "logVerbosity", i.LogVerbosity) - populate(objectMap, "modelSettings", i.ModelSettings) - populate(objectMap, "primaryMetric", i.PrimaryMetric) - populate(objectMap, "searchSpace", i.SearchSpace) - populate(objectMap, "sweepSettings", i.SweepSettings) - populate(objectMap, "targetColumnName", i.TargetColumnName) - objectMap["taskType"] = TaskTypeImageInstanceSegmentation - populate(objectMap, "trainingData", i.TrainingData) - populate(objectMap, "validationData", i.ValidationData) - populate(objectMap, "validationDataSize", i.ValidationDataSize) + populate(objectMap, "autoDeleteSetting", f.AutoDeleteSetting) + populate(objectMap, "description", f.Description) + populate(objectMap, "entities", f.Entities) + populate(objectMap, "isAnonymous", f.IsAnonymous) + populate(objectMap, "isArchived", f.IsArchived) + populate(objectMap, "materializationSettings", f.MaterializationSettings) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "provisioningState", f.ProvisioningState) + populate(objectMap, "specification", f.Specification) + populate(objectMap, "stage", f.Stage) + populate(objectMap, "tags", f.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageInstanceSegmentation. -func (i *ImageInstanceSegmentation) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetVersionProperties. +func (f *FeaturesetVersionProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "limitSettings": - err = unpopulate(val, "LimitSettings", &i.LimitSettings) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &f.AutoDeleteSetting) delete(rawMsg, key) - case "logVerbosity": - err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) + case "description": + err = unpopulate(val, "Description", &f.Description) delete(rawMsg, key) - case "modelSettings": - err = unpopulate(val, "ModelSettings", &i.ModelSettings) + case "entities": + err = unpopulate(val, "Entities", &f.Entities) delete(rawMsg, key) - case "primaryMetric": - err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) + case "isAnonymous": + err = unpopulate(val, "IsAnonymous", &f.IsAnonymous) delete(rawMsg, key) - case "searchSpace": - err = unpopulate(val, "SearchSpace", &i.SearchSpace) + case "isArchived": + err = unpopulate(val, "IsArchived", &f.IsArchived) delete(rawMsg, key) - case "sweepSettings": - err = unpopulate(val, "SweepSettings", &i.SweepSettings) + case "materializationSettings": + err = unpopulate(val, "MaterializationSettings", &f.MaterializationSettings) delete(rawMsg, key) - case "targetColumnName": - err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) - case "taskType": - err = unpopulate(val, "TaskType", &i.TaskType) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &f.ProvisioningState) delete(rawMsg, key) - case "trainingData": - err = unpopulate(val, "TrainingData", &i.TrainingData) + case "specification": + err = unpopulate(val, "Specification", &f.Specification) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &i.ValidationData) + case "stage": + err = unpopulate(val, "Stage", &f.Stage) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageLimitSettings. -func (i ImageLimitSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturesetVersionResourceArmPaginatedResult. +func (f FeaturesetVersionResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "maxConcurrentTrials", i.MaxConcurrentTrials) - populate(objectMap, "maxTrials", i.MaxTrials) - populate(objectMap, "timeout", i.Timeout) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageLimitSettings. -func (i *ImageLimitSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturesetVersionResourceArmPaginatedResult. +func (f *FeaturesetVersionResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "maxConcurrentTrials": - err = unpopulate(val, "MaxConcurrentTrials", &i.MaxConcurrentTrials) - delete(rawMsg, key) - case "maxTrials": - err = unpopulate(val, "MaxTrials", &i.MaxTrials) + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) delete(rawMsg, key) - case "timeout": - err = unpopulate(val, "Timeout", &i.Timeout) + case "value": + err = unpopulate(val, "Value", &f.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageModelDistributionSettings. -func (i ImageModelDistributionSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturestoreEntityContainer. +func (f FeaturestoreEntityContainer) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "amsGradient", i.AmsGradient) - populate(objectMap, "augmentations", i.Augmentations) - populate(objectMap, "beta1", i.Beta1) - populate(objectMap, "beta2", i.Beta2) - populate(objectMap, "distributed", i.Distributed) - populate(objectMap, "earlyStopping", i.EarlyStopping) - populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) - populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) - populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) - populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) - populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) - populate(objectMap, "layersToFreeze", i.LayersToFreeze) - populate(objectMap, "learningRate", i.LearningRate) - populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) - populate(objectMap, "modelName", i.ModelName) - populate(objectMap, "momentum", i.Momentum) - populate(objectMap, "nesterov", i.Nesterov) - populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) - populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) - populate(objectMap, "optimizer", i.Optimizer) - populate(objectMap, "randomSeed", i.RandomSeed) - populate(objectMap, "stepLRGamma", i.StepLRGamma) - populate(objectMap, "stepLRStepSize", i.StepLRStepSize) - populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) - populate(objectMap, "validationBatchSize", i.ValidationBatchSize) - populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) - populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) - populate(objectMap, "weightDecay", i.WeightDecay) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelDistributionSettings. -func (i *ImageModelDistributionSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturestoreEntityContainer. +func (f *FeaturestoreEntityContainer) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "amsGradient": - err = unpopulate(val, "AmsGradient", &i.AmsGradient) + case "id": + err = unpopulate(val, "ID", &f.ID) delete(rawMsg, key) - case "augmentations": - err = unpopulate(val, "Augmentations", &i.Augmentations) + case "name": + err = unpopulate(val, "Name", &f.Name) delete(rawMsg, key) - case "beta1": - err = unpopulate(val, "Beta1", &i.Beta1) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) - case "beta2": - err = unpopulate(val, "Beta2", &i.Beta2) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) delete(rawMsg, key) - case "distributed": - err = unpopulate(val, "Distributed", &i.Distributed) + case "type": + err = unpopulate(val, "Type", &f.Type) delete(rawMsg, key) - case "earlyStopping": - err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FeaturestoreEntityContainerProperties. +func (f FeaturestoreEntityContainerProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", f.Description) + populate(objectMap, "isArchived", f.IsArchived) + populate(objectMap, "latestVersion", f.LatestVersion) + populate(objectMap, "nextVersion", f.NextVersion) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "provisioningState", f.ProvisioningState) + populate(objectMap, "tags", f.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturestoreEntityContainerProperties. +func (f *FeaturestoreEntityContainerProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &f.Description) delete(rawMsg, key) - case "earlyStoppingDelay": - err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + case "isArchived": + err = unpopulate(val, "IsArchived", &f.IsArchived) delete(rawMsg, key) - case "earlyStoppingPatience": - err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + case "latestVersion": + err = unpopulate(val, "LatestVersion", &f.LatestVersion) delete(rawMsg, key) - case "enableOnnxNormalization": - err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + case "nextVersion": + err = unpopulate(val, "NextVersion", &f.NextVersion) delete(rawMsg, key) - case "evaluationFrequency": - err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) - case "gradientAccumulationStep": - err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &f.ProvisioningState) delete(rawMsg, key) - case "layersToFreeze": - err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) delete(rawMsg, key) - case "learningRate": - err = unpopulate(val, "LearningRate", &i.LearningRate) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FeaturestoreEntityContainerResourceArmPaginatedResult. +func (f FeaturestoreEntityContainerResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturestoreEntityContainerResourceArmPaginatedResult. +func (f *FeaturestoreEntityContainerResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) delete(rawMsg, key) - case "learningRateScheduler": - err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + case "value": + err = unpopulate(val, "Value", &f.Value) delete(rawMsg, key) - case "modelName": - err = unpopulate(val, "ModelName", &i.ModelName) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FeaturestoreEntityVersion. +func (f FeaturestoreEntityVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturestoreEntityVersion. +func (f *FeaturestoreEntityVersion) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &f.ID) delete(rawMsg, key) - case "momentum": - err = unpopulate(val, "Momentum", &i.Momentum) + case "name": + err = unpopulate(val, "Name", &f.Name) delete(rawMsg, key) - case "nesterov": - err = unpopulate(val, "Nesterov", &i.Nesterov) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) - case "numberOfEpochs": - err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) delete(rawMsg, key) - case "numberOfWorkers": - err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + case "type": + err = unpopulate(val, "Type", &f.Type) delete(rawMsg, key) - case "optimizer": - err = unpopulate(val, "Optimizer", &i.Optimizer) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FeaturestoreEntityVersionProperties. +func (f FeaturestoreEntityVersionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", f.AutoDeleteSetting) + populate(objectMap, "description", f.Description) + populate(objectMap, "indexColumns", f.IndexColumns) + populate(objectMap, "isAnonymous", f.IsAnonymous) + populate(objectMap, "isArchived", f.IsArchived) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "provisioningState", f.ProvisioningState) + populate(objectMap, "stage", f.Stage) + populate(objectMap, "tags", f.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturestoreEntityVersionProperties. +func (f *FeaturestoreEntityVersionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &f.AutoDeleteSetting) delete(rawMsg, key) - case "randomSeed": - err = unpopulate(val, "RandomSeed", &i.RandomSeed) + case "description": + err = unpopulate(val, "Description", &f.Description) delete(rawMsg, key) - case "stepLRGamma": - err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + case "indexColumns": + err = unpopulate(val, "IndexColumns", &f.IndexColumns) delete(rawMsg, key) - case "stepLRStepSize": - err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + case "isAnonymous": + err = unpopulate(val, "IsAnonymous", &f.IsAnonymous) delete(rawMsg, key) - case "trainingBatchSize": - err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + case "isArchived": + err = unpopulate(val, "IsArchived", &f.IsArchived) delete(rawMsg, key) - case "validationBatchSize": - err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) delete(rawMsg, key) - case "warmupCosineLRCycles": - err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &f.ProvisioningState) delete(rawMsg, key) - case "warmupCosineLRWarmupEpochs": - err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + case "stage": + err = unpopulate(val, "Stage", &f.Stage) delete(rawMsg, key) - case "weightDecay": - err = unpopulate(val, "WeightDecay", &i.WeightDecay) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageModelDistributionSettingsClassification. -func (i ImageModelDistributionSettingsClassification) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type FeaturestoreEntityVersionResourceArmPaginatedResult. +func (f FeaturestoreEntityVersionResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "amsGradient", i.AmsGradient) - populate(objectMap, "augmentations", i.Augmentations) - populate(objectMap, "beta1", i.Beta1) - populate(objectMap, "beta2", i.Beta2) - populate(objectMap, "distributed", i.Distributed) - populate(objectMap, "earlyStopping", i.EarlyStopping) - populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) - populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) - populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) - populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) - populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) - populate(objectMap, "layersToFreeze", i.LayersToFreeze) - populate(objectMap, "learningRate", i.LearningRate) - populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) - populate(objectMap, "modelName", i.ModelName) - populate(objectMap, "momentum", i.Momentum) - populate(objectMap, "nesterov", i.Nesterov) - populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) - populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) - populate(objectMap, "optimizer", i.Optimizer) - populate(objectMap, "randomSeed", i.RandomSeed) - populate(objectMap, "stepLRGamma", i.StepLRGamma) - populate(objectMap, "stepLRStepSize", i.StepLRStepSize) - populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) - populate(objectMap, "trainingCropSize", i.TrainingCropSize) - populate(objectMap, "validationBatchSize", i.ValidationBatchSize) - populate(objectMap, "validationCropSize", i.ValidationCropSize) - populate(objectMap, "validationResizeSize", i.ValidationResizeSize) - populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) - populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) - populate(objectMap, "weightDecay", i.WeightDecay) - populate(objectMap, "weightedLoss", i.WeightedLoss) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelDistributionSettingsClassification. -func (i *ImageModelDistributionSettingsClassification) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturestoreEntityVersionResourceArmPaginatedResult. +func (f *FeaturestoreEntityVersionResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "amsGradient": - err = unpopulate(val, "AmsGradient", &i.AmsGradient) - delete(rawMsg, key) - case "augmentations": - err = unpopulate(val, "Augmentations", &i.Augmentations) - delete(rawMsg, key) - case "beta1": - err = unpopulate(val, "Beta1", &i.Beta1) - delete(rawMsg, key) - case "beta2": - err = unpopulate(val, "Beta2", &i.Beta2) - delete(rawMsg, key) - case "distributed": - err = unpopulate(val, "Distributed", &i.Distributed) + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) delete(rawMsg, key) - case "earlyStopping": - err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + case "value": + err = unpopulate(val, "Value", &f.Value) delete(rawMsg, key) - case "earlyStoppingDelay": - err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FeaturizationSettings. +func (f FeaturizationSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "datasetLanguage", f.DatasetLanguage) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FeaturizationSettings. +func (f *FeaturizationSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "datasetLanguage": + err = unpopulate(val, "DatasetLanguage", &f.DatasetLanguage) delete(rawMsg, key) - case "earlyStoppingPatience": - err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FileSystemSource. +func (f FileSystemSource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "connection", f.Connection) + populate(objectMap, "path", f.Path) + objectMap["sourceType"] = DataImportSourceTypeFileSystem + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FileSystemSource. +func (f *FileSystemSource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "connection": + err = unpopulate(val, "Connection", &f.Connection) delete(rawMsg, key) - case "enableOnnxNormalization": - err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + case "path": + err = unpopulate(val, "Path", &f.Path) delete(rawMsg, key) - case "evaluationFrequency": - err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + case "sourceType": + err = unpopulate(val, "SourceType", &f.SourceType) delete(rawMsg, key) - case "gradientAccumulationStep": - err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FixedInputData. +func (f FixedInputData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "columns", f.Columns) + populate(objectMap, "dataContext", f.DataContext) + objectMap["inputDataType"] = MonitoringInputDataTypeFixed + populate(objectMap, "jobInputType", f.JobInputType) + populate(objectMap, "uri", f.URI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FixedInputData. +func (f *FixedInputData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "columns": + err = unpopulate(val, "Columns", &f.Columns) delete(rawMsg, key) - case "layersToFreeze": - err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + case "dataContext": + err = unpopulate(val, "DataContext", &f.DataContext) delete(rawMsg, key) - case "learningRate": - err = unpopulate(val, "LearningRate", &i.LearningRate) + case "inputDataType": + err = unpopulate(val, "InputDataType", &f.InputDataType) delete(rawMsg, key) - case "learningRateScheduler": - err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + case "jobInputType": + err = unpopulate(val, "JobInputType", &f.JobInputType) delete(rawMsg, key) - case "modelName": - err = unpopulate(val, "ModelName", &i.ModelName) + case "uri": + err = unpopulate(val, "URI", &f.URI) delete(rawMsg, key) - case "momentum": - err = unpopulate(val, "Momentum", &i.Momentum) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FlavorData. +func (f FlavorData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "data", f.Data) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FlavorData. +func (f *FlavorData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "data": + err = unpopulate(val, "Data", &f.Data) delete(rawMsg, key) - case "nesterov": - err = unpopulate(val, "Nesterov", &i.Nesterov) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ForecastHorizon. +func (f ForecastHorizon) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mode"] = f.Mode + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ForecastHorizon. +func (f *ForecastHorizon) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &f.Mode) delete(rawMsg, key) - case "numberOfEpochs": - err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Forecasting. +func (f Forecasting) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "cvSplitColumnNames", f.CvSplitColumnNames) + populate(objectMap, "featurizationSettings", f.FeaturizationSettings) + populate(objectMap, "fixedParameters", f.FixedParameters) + populate(objectMap, "forecastingSettings", f.ForecastingSettings) + populate(objectMap, "limitSettings", f.LimitSettings) + populate(objectMap, "logVerbosity", f.LogVerbosity) + populate(objectMap, "nCrossValidations", f.NCrossValidations) + populate(objectMap, "primaryMetric", f.PrimaryMetric) + populate(objectMap, "searchSpace", f.SearchSpace) + populate(objectMap, "sweepSettings", f.SweepSettings) + populate(objectMap, "targetColumnName", f.TargetColumnName) + objectMap["taskType"] = TaskTypeForecasting + populate(objectMap, "testData", f.TestData) + populate(objectMap, "testDataSize", f.TestDataSize) + populate(objectMap, "trainingData", f.TrainingData) + populate(objectMap, "trainingSettings", f.TrainingSettings) + populate(objectMap, "validationData", f.ValidationData) + populate(objectMap, "validationDataSize", f.ValidationDataSize) + populate(objectMap, "weightColumnName", f.WeightColumnName) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Forecasting. +func (f *Forecasting) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "cvSplitColumnNames": + err = unpopulate(val, "CvSplitColumnNames", &f.CvSplitColumnNames) delete(rawMsg, key) - case "numberOfWorkers": - err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + case "featurizationSettings": + err = unpopulate(val, "FeaturizationSettings", &f.FeaturizationSettings) delete(rawMsg, key) - case "optimizer": - err = unpopulate(val, "Optimizer", &i.Optimizer) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &f.FixedParameters) delete(rawMsg, key) - case "randomSeed": - err = unpopulate(val, "RandomSeed", &i.RandomSeed) + case "forecastingSettings": + err = unpopulate(val, "ForecastingSettings", &f.ForecastingSettings) delete(rawMsg, key) - case "stepLRGamma": - err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + case "limitSettings": + err = unpopulate(val, "LimitSettings", &f.LimitSettings) delete(rawMsg, key) - case "stepLRStepSize": - err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + case "logVerbosity": + err = unpopulate(val, "LogVerbosity", &f.LogVerbosity) delete(rawMsg, key) - case "trainingBatchSize": - err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + case "nCrossValidations": + f.NCrossValidations, err = unmarshalNCrossValidationsClassification(val) delete(rawMsg, key) - case "trainingCropSize": - err = unpopulate(val, "TrainingCropSize", &i.TrainingCropSize) + case "primaryMetric": + err = unpopulate(val, "PrimaryMetric", &f.PrimaryMetric) delete(rawMsg, key) - case "validationBatchSize": - err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &f.SearchSpace) delete(rawMsg, key) - case "validationCropSize": - err = unpopulate(val, "ValidationCropSize", &i.ValidationCropSize) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &f.SweepSettings) delete(rawMsg, key) - case "validationResizeSize": - err = unpopulate(val, "ValidationResizeSize", &i.ValidationResizeSize) + case "targetColumnName": + err = unpopulate(val, "TargetColumnName", &f.TargetColumnName) delete(rawMsg, key) - case "warmupCosineLRCycles": - err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + case "taskType": + err = unpopulate(val, "TaskType", &f.TaskType) delete(rawMsg, key) - case "warmupCosineLRWarmupEpochs": - err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + case "testData": + err = unpopulate(val, "TestData", &f.TestData) delete(rawMsg, key) - case "weightDecay": - err = unpopulate(val, "WeightDecay", &i.WeightDecay) + case "testDataSize": + err = unpopulate(val, "TestDataSize", &f.TestDataSize) delete(rawMsg, key) - case "weightedLoss": - err = unpopulate(val, "WeightedLoss", &i.WeightedLoss) + case "trainingData": + err = unpopulate(val, "TrainingData", &f.TrainingData) + delete(rawMsg, key) + case "trainingSettings": + err = unpopulate(val, "TrainingSettings", &f.TrainingSettings) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &f.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &f.ValidationDataSize) + delete(rawMsg, key) + case "weightColumnName": + err = unpopulate(val, "WeightColumnName", &f.WeightColumnName) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageModelDistributionSettingsObjectDetection. -func (i ImageModelDistributionSettingsObjectDetection) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ForecastingSettings. +func (f ForecastingSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "amsGradient", i.AmsGradient) - populate(objectMap, "augmentations", i.Augmentations) - populate(objectMap, "beta1", i.Beta1) - populate(objectMap, "beta2", i.Beta2) - populate(objectMap, "boxDetectionsPerImage", i.BoxDetectionsPerImage) - populate(objectMap, "boxScoreThreshold", i.BoxScoreThreshold) - populate(objectMap, "distributed", i.Distributed) - populate(objectMap, "earlyStopping", i.EarlyStopping) - populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) - populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) - populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) - populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) - populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) - populate(objectMap, "imageSize", i.ImageSize) - populate(objectMap, "layersToFreeze", i.LayersToFreeze) - populate(objectMap, "learningRate", i.LearningRate) - populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) - populate(objectMap, "maxSize", i.MaxSize) - populate(objectMap, "minSize", i.MinSize) - populate(objectMap, "modelName", i.ModelName) - populate(objectMap, "modelSize", i.ModelSize) - populate(objectMap, "momentum", i.Momentum) - populate(objectMap, "multiScale", i.MultiScale) - populate(objectMap, "nesterov", i.Nesterov) - populate(objectMap, "nmsIouThreshold", i.NmsIouThreshold) - populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) - populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) - populate(objectMap, "optimizer", i.Optimizer) - populate(objectMap, "randomSeed", i.RandomSeed) - populate(objectMap, "stepLRGamma", i.StepLRGamma) - populate(objectMap, "stepLRStepSize", i.StepLRStepSize) - populate(objectMap, "tileGridSize", i.TileGridSize) - populate(objectMap, "tileOverlapRatio", i.TileOverlapRatio) - populate(objectMap, "tilePredictionsNmsThreshold", i.TilePredictionsNmsThreshold) - populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) - populate(objectMap, "validationBatchSize", i.ValidationBatchSize) - populate(objectMap, "validationIouThreshold", i.ValidationIouThreshold) - populate(objectMap, "validationMetricType", i.ValidationMetricType) - populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) - populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) - populate(objectMap, "weightDecay", i.WeightDecay) + populate(objectMap, "countryOrRegionForHolidays", f.CountryOrRegionForHolidays) + populate(objectMap, "cvStepSize", f.CvStepSize) + populate(objectMap, "featureLags", f.FeatureLags) + populate(objectMap, "featuresUnknownAtForecastTime", f.FeaturesUnknownAtForecastTime) + populate(objectMap, "forecastHorizon", f.ForecastHorizon) + populate(objectMap, "frequency", f.Frequency) + populate(objectMap, "seasonality", f.Seasonality) + populate(objectMap, "shortSeriesHandlingConfig", f.ShortSeriesHandlingConfig) + populate(objectMap, "targetAggregateFunction", f.TargetAggregateFunction) + populate(objectMap, "targetLags", f.TargetLags) + populate(objectMap, "targetRollingWindowSize", f.TargetRollingWindowSize) + populate(objectMap, "timeColumnName", f.TimeColumnName) + populate(objectMap, "timeSeriesIdColumnNames", f.TimeSeriesIDColumnNames) + populate(objectMap, "useStl", f.UseStl) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelDistributionSettingsObjectDetection. -func (i *ImageModelDistributionSettingsObjectDetection) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ForecastingSettings. +func (f *ForecastingSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } for key, val := range rawMsg { var err error switch key { - case "amsGradient": - err = unpopulate(val, "AmsGradient", &i.AmsGradient) - delete(rawMsg, key) - case "augmentations": - err = unpopulate(val, "Augmentations", &i.Augmentations) - delete(rawMsg, key) - case "beta1": - err = unpopulate(val, "Beta1", &i.Beta1) - delete(rawMsg, key) - case "beta2": - err = unpopulate(val, "Beta2", &i.Beta2) - delete(rawMsg, key) - case "boxDetectionsPerImage": - err = unpopulate(val, "BoxDetectionsPerImage", &i.BoxDetectionsPerImage) - delete(rawMsg, key) - case "boxScoreThreshold": - err = unpopulate(val, "BoxScoreThreshold", &i.BoxScoreThreshold) - delete(rawMsg, key) - case "distributed": - err = unpopulate(val, "Distributed", &i.Distributed) - delete(rawMsg, key) - case "earlyStopping": - err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) - delete(rawMsg, key) - case "earlyStoppingDelay": - err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) - delete(rawMsg, key) - case "earlyStoppingPatience": - err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) - delete(rawMsg, key) - case "enableOnnxNormalization": - err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) - delete(rawMsg, key) - case "evaluationFrequency": - err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) - delete(rawMsg, key) - case "gradientAccumulationStep": - err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) - delete(rawMsg, key) - case "imageSize": - err = unpopulate(val, "ImageSize", &i.ImageSize) + case "countryOrRegionForHolidays": + err = unpopulate(val, "CountryOrRegionForHolidays", &f.CountryOrRegionForHolidays) delete(rawMsg, key) - case "layersToFreeze": - err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + case "cvStepSize": + err = unpopulate(val, "CvStepSize", &f.CvStepSize) delete(rawMsg, key) - case "learningRate": - err = unpopulate(val, "LearningRate", &i.LearningRate) + case "featureLags": + err = unpopulate(val, "FeatureLags", &f.FeatureLags) delete(rawMsg, key) - case "learningRateScheduler": - err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + case "featuresUnknownAtForecastTime": + err = unpopulate(val, "FeaturesUnknownAtForecastTime", &f.FeaturesUnknownAtForecastTime) delete(rawMsg, key) - case "maxSize": - err = unpopulate(val, "MaxSize", &i.MaxSize) + case "forecastHorizon": + f.ForecastHorizon, err = unmarshalForecastHorizonClassification(val) delete(rawMsg, key) - case "minSize": - err = unpopulate(val, "MinSize", &i.MinSize) + case "frequency": + err = unpopulate(val, "Frequency", &f.Frequency) delete(rawMsg, key) - case "modelName": - err = unpopulate(val, "ModelName", &i.ModelName) + case "seasonality": + f.Seasonality, err = unmarshalSeasonalityClassification(val) delete(rawMsg, key) - case "modelSize": - err = unpopulate(val, "ModelSize", &i.ModelSize) + case "shortSeriesHandlingConfig": + err = unpopulate(val, "ShortSeriesHandlingConfig", &f.ShortSeriesHandlingConfig) delete(rawMsg, key) - case "momentum": - err = unpopulate(val, "Momentum", &i.Momentum) + case "targetAggregateFunction": + err = unpopulate(val, "TargetAggregateFunction", &f.TargetAggregateFunction) delete(rawMsg, key) - case "multiScale": - err = unpopulate(val, "MultiScale", &i.MultiScale) + case "targetLags": + f.TargetLags, err = unmarshalTargetLagsClassification(val) delete(rawMsg, key) - case "nesterov": - err = unpopulate(val, "Nesterov", &i.Nesterov) + case "targetRollingWindowSize": + f.TargetRollingWindowSize, err = unmarshalTargetRollingWindowSizeClassification(val) delete(rawMsg, key) - case "nmsIouThreshold": - err = unpopulate(val, "NmsIouThreshold", &i.NmsIouThreshold) + case "timeColumnName": + err = unpopulate(val, "TimeColumnName", &f.TimeColumnName) delete(rawMsg, key) - case "numberOfEpochs": - err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + case "timeSeriesIdColumnNames": + err = unpopulate(val, "TimeSeriesIDColumnNames", &f.TimeSeriesIDColumnNames) delete(rawMsg, key) - case "numberOfWorkers": - err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + case "useStl": + err = unpopulate(val, "UseStl", &f.UseStl) delete(rawMsg, key) - case "optimizer": - err = unpopulate(val, "Optimizer", &i.Optimizer) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ForecastingTrainingSettings. +func (f ForecastingTrainingSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "allowedTrainingAlgorithms", f.AllowedTrainingAlgorithms) + populate(objectMap, "blockedTrainingAlgorithms", f.BlockedTrainingAlgorithms) + populate(objectMap, "enableDnnTraining", f.EnableDnnTraining) + populate(objectMap, "enableModelExplainability", f.EnableModelExplainability) + populate(objectMap, "enableOnnxCompatibleModels", f.EnableOnnxCompatibleModels) + populate(objectMap, "enableStackEnsemble", f.EnableStackEnsemble) + populate(objectMap, "enableVoteEnsemble", f.EnableVoteEnsemble) + populate(objectMap, "ensembleModelDownloadTimeout", f.EnsembleModelDownloadTimeout) + populate(objectMap, "stackEnsembleSettings", f.StackEnsembleSettings) + populate(objectMap, "trainingMode", f.TrainingMode) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ForecastingTrainingSettings. +func (f *ForecastingTrainingSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "allowedTrainingAlgorithms": + err = unpopulate(val, "AllowedTrainingAlgorithms", &f.AllowedTrainingAlgorithms) delete(rawMsg, key) - case "randomSeed": - err = unpopulate(val, "RandomSeed", &i.RandomSeed) + case "blockedTrainingAlgorithms": + err = unpopulate(val, "BlockedTrainingAlgorithms", &f.BlockedTrainingAlgorithms) delete(rawMsg, key) - case "stepLRGamma": - err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + case "enableDnnTraining": + err = unpopulate(val, "EnableDnnTraining", &f.EnableDnnTraining) delete(rawMsg, key) - case "stepLRStepSize": - err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + case "enableModelExplainability": + err = unpopulate(val, "EnableModelExplainability", &f.EnableModelExplainability) delete(rawMsg, key) - case "tileGridSize": - err = unpopulate(val, "TileGridSize", &i.TileGridSize) + case "enableOnnxCompatibleModels": + err = unpopulate(val, "EnableOnnxCompatibleModels", &f.EnableOnnxCompatibleModels) delete(rawMsg, key) - case "tileOverlapRatio": - err = unpopulate(val, "TileOverlapRatio", &i.TileOverlapRatio) + case "enableStackEnsemble": + err = unpopulate(val, "EnableStackEnsemble", &f.EnableStackEnsemble) delete(rawMsg, key) - case "tilePredictionsNmsThreshold": - err = unpopulate(val, "TilePredictionsNmsThreshold", &i.TilePredictionsNmsThreshold) + case "enableVoteEnsemble": + err = unpopulate(val, "EnableVoteEnsemble", &f.EnableVoteEnsemble) delete(rawMsg, key) - case "trainingBatchSize": - err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + case "ensembleModelDownloadTimeout": + err = unpopulate(val, "EnsembleModelDownloadTimeout", &f.EnsembleModelDownloadTimeout) delete(rawMsg, key) - case "validationBatchSize": - err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + case "stackEnsembleSettings": + err = unpopulate(val, "StackEnsembleSettings", &f.StackEnsembleSettings) delete(rawMsg, key) - case "validationIouThreshold": - err = unpopulate(val, "ValidationIouThreshold", &i.ValidationIouThreshold) + case "trainingMode": + err = unpopulate(val, "TrainingMode", &f.TrainingMode) delete(rawMsg, key) - case "validationMetricType": - err = unpopulate(val, "ValidationMetricType", &i.ValidationMetricType) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FqdnOutboundRule. +func (f FqdnOutboundRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "category", f.Category) + populate(objectMap, "destination", f.Destination) + populate(objectMap, "status", f.Status) + objectMap["type"] = RuleTypeFQDN + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FqdnOutboundRule. +func (f *FqdnOutboundRule) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "category": + err = unpopulate(val, "Category", &f.Category) delete(rawMsg, key) - case "warmupCosineLRCycles": - err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + case "destination": + err = unpopulate(val, "Destination", &f.Destination) delete(rawMsg, key) - case "warmupCosineLRWarmupEpochs": - err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + case "status": + err = unpopulate(val, "Status", &f.Status) delete(rawMsg, key) - case "weightDecay": - err = unpopulate(val, "WeightDecay", &i.WeightDecay) + case "type": + err = unpopulate(val, "Type", &f.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", f, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageModelSettings. -func (i ImageModelSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type GenerationSafetyQualityMetricThreshold. +func (g GenerationSafetyQualityMetricThreshold) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "advancedSettings", i.AdvancedSettings) - populate(objectMap, "amsGradient", i.AmsGradient) - populate(objectMap, "augmentations", i.Augmentations) - populate(objectMap, "beta1", i.Beta1) - populate(objectMap, "beta2", i.Beta2) - populate(objectMap, "checkpointFrequency", i.CheckpointFrequency) - populate(objectMap, "checkpointModel", i.CheckpointModel) - populate(objectMap, "checkpointRunId", i.CheckpointRunID) - populate(objectMap, "distributed", i.Distributed) - populate(objectMap, "earlyStopping", i.EarlyStopping) - populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) - populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) - populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) - populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) - populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) - populate(objectMap, "layersToFreeze", i.LayersToFreeze) - populate(objectMap, "learningRate", i.LearningRate) - populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) - populate(objectMap, "modelName", i.ModelName) - populate(objectMap, "momentum", i.Momentum) - populate(objectMap, "nesterov", i.Nesterov) - populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) - populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) - populate(objectMap, "optimizer", i.Optimizer) - populate(objectMap, "randomSeed", i.RandomSeed) - populate(objectMap, "stepLRGamma", i.StepLRGamma) - populate(objectMap, "stepLRStepSize", i.StepLRStepSize) - populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) - populate(objectMap, "validationBatchSize", i.ValidationBatchSize) - populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) - populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) - populate(objectMap, "weightDecay", i.WeightDecay) + populate(objectMap, "metric", g.Metric) + populate(objectMap, "threshold", g.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelSettings. -func (i *ImageModelSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type GenerationSafetyQualityMetricThreshold. +func (g *GenerationSafetyQualityMetricThreshold) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", g, err) } for key, val := range rawMsg { var err error switch key { - case "advancedSettings": - err = unpopulate(val, "AdvancedSettings", &i.AdvancedSettings) - delete(rawMsg, key) - case "amsGradient": - err = unpopulate(val, "AmsGradient", &i.AmsGradient) - delete(rawMsg, key) - case "augmentations": - err = unpopulate(val, "Augmentations", &i.Augmentations) + case "metric": + err = unpopulate(val, "Metric", &g.Metric) delete(rawMsg, key) - case "beta1": - err = unpopulate(val, "Beta1", &i.Beta1) - delete(rawMsg, key) - case "beta2": - err = unpopulate(val, "Beta2", &i.Beta2) + case "threshold": + err = unpopulate(val, "Threshold", &g.Threshold) delete(rawMsg, key) - case "checkpointFrequency": - err = unpopulate(val, "CheckpointFrequency", &i.CheckpointFrequency) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GenerationSafetyQualityMonitoringSignal. +func (g GenerationSafetyQualityMonitoringSignal) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "metricThresholds", g.MetricThresholds) + populate(objectMap, "mode", g.Mode) + populate(objectMap, "productionData", g.ProductionData) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "samplingRate", g.SamplingRate) + objectMap["signalType"] = MonitoringSignalTypeGenerationSafetyQuality + populate(objectMap, "workspaceConnectionId", g.WorkspaceConnectionID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GenerationSafetyQualityMonitoringSignal. +func (g *GenerationSafetyQualityMonitoringSignal) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "metricThresholds": + err = unpopulate(val, "MetricThresholds", &g.MetricThresholds) delete(rawMsg, key) - case "checkpointModel": - err = unpopulate(val, "CheckpointModel", &i.CheckpointModel) + case "mode": + err = unpopulate(val, "Mode", &g.Mode) delete(rawMsg, key) - case "checkpointRunId": - err = unpopulate(val, "CheckpointRunID", &i.CheckpointRunID) + case "productionData": + g.ProductionData, err = unmarshalMonitoringInputDataBaseClassificationArray(val) delete(rawMsg, key) - case "distributed": - err = unpopulate(val, "Distributed", &i.Distributed) + case "properties": + err = unpopulate(val, "Properties", &g.Properties) delete(rawMsg, key) - case "earlyStopping": - err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + case "samplingRate": + err = unpopulate(val, "SamplingRate", &g.SamplingRate) delete(rawMsg, key) - case "earlyStoppingDelay": - err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + case "signalType": + err = unpopulate(val, "SignalType", &g.SignalType) delete(rawMsg, key) - case "earlyStoppingPatience": - err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + case "workspaceConnectionId": + err = unpopulate(val, "WorkspaceConnectionID", &g.WorkspaceConnectionID) delete(rawMsg, key) - case "enableOnnxNormalization": - err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GenerationTokenStatisticsMetricThreshold. +func (g GenerationTokenStatisticsMetricThreshold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "metric", g.Metric) + populate(objectMap, "threshold", g.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GenerationTokenStatisticsMetricThreshold. +func (g *GenerationTokenStatisticsMetricThreshold) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "metric": + err = unpopulate(val, "Metric", &g.Metric) delete(rawMsg, key) - case "evaluationFrequency": - err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + case "threshold": + err = unpopulate(val, "Threshold", &g.Threshold) delete(rawMsg, key) - case "gradientAccumulationStep": - err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GenerationTokenStatisticsSignal. +func (g GenerationTokenStatisticsSignal) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "metricThresholds", g.MetricThresholds) + populate(objectMap, "mode", g.Mode) + populate(objectMap, "productionData", g.ProductionData) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "samplingRate", g.SamplingRate) + objectMap["signalType"] = MonitoringSignalTypeGenerationTokenStatistics + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GenerationTokenStatisticsSignal. +func (g *GenerationTokenStatisticsSignal) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "metricThresholds": + err = unpopulate(val, "MetricThresholds", &g.MetricThresholds) delete(rawMsg, key) - case "layersToFreeze": - err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + case "mode": + err = unpopulate(val, "Mode", &g.Mode) delete(rawMsg, key) - case "learningRate": - err = unpopulate(val, "LearningRate", &i.LearningRate) + case "productionData": + g.ProductionData, err = unmarshalMonitoringInputDataBaseClassification(val) delete(rawMsg, key) - case "learningRateScheduler": - err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + case "properties": + err = unpopulate(val, "Properties", &g.Properties) delete(rawMsg, key) - case "modelName": - err = unpopulate(val, "ModelName", &i.ModelName) + case "samplingRate": + err = unpopulate(val, "SamplingRate", &g.SamplingRate) delete(rawMsg, key) - case "momentum": - err = unpopulate(val, "Momentum", &i.Momentum) + case "signalType": + err = unpopulate(val, "SignalType", &g.SignalType) delete(rawMsg, key) - case "nesterov": - err = unpopulate(val, "Nesterov", &i.Nesterov) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GridSamplingAlgorithm. +func (g GridSamplingAlgorithm) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["samplingAlgorithmType"] = SamplingAlgorithmTypeGrid + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GridSamplingAlgorithm. +func (g *GridSamplingAlgorithm) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "samplingAlgorithmType": + err = unpopulate(val, "SamplingAlgorithmType", &g.SamplingAlgorithmType) delete(rawMsg, key) - case "numberOfEpochs": - err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type HDInsight. +func (h HDInsight) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "computeLocation", h.ComputeLocation) + objectMap["computeType"] = ComputeTypeHDInsight + populateTimeRFC3339(objectMap, "createdOn", h.CreatedOn) + populate(objectMap, "description", h.Description) + populate(objectMap, "disableLocalAuth", h.DisableLocalAuth) + populate(objectMap, "isAttachedCompute", h.IsAttachedCompute) + populateTimeRFC3339(objectMap, "modifiedOn", h.ModifiedOn) + populate(objectMap, "properties", h.Properties) + populate(objectMap, "provisioningErrors", h.ProvisioningErrors) + populate(objectMap, "provisioningState", h.ProvisioningState) + populate(objectMap, "resourceId", h.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type HDInsight. +func (h *HDInsight) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", h, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "computeLocation": + err = unpopulate(val, "ComputeLocation", &h.ComputeLocation) delete(rawMsg, key) - case "numberOfWorkers": - err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + case "computeType": + err = unpopulate(val, "ComputeType", &h.ComputeType) delete(rawMsg, key) - case "optimizer": - err = unpopulate(val, "Optimizer", &i.Optimizer) + case "createdOn": + err = unpopulateTimeRFC3339(val, "CreatedOn", &h.CreatedOn) delete(rawMsg, key) - case "randomSeed": - err = unpopulate(val, "RandomSeed", &i.RandomSeed) + case "description": + err = unpopulate(val, "Description", &h.Description) delete(rawMsg, key) - case "stepLRGamma": - err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + case "disableLocalAuth": + err = unpopulate(val, "DisableLocalAuth", &h.DisableLocalAuth) delete(rawMsg, key) - case "stepLRStepSize": - err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + case "isAttachedCompute": + err = unpopulate(val, "IsAttachedCompute", &h.IsAttachedCompute) delete(rawMsg, key) - case "trainingBatchSize": - err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + case "modifiedOn": + err = unpopulateTimeRFC3339(val, "ModifiedOn", &h.ModifiedOn) delete(rawMsg, key) - case "validationBatchSize": - err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + case "properties": + err = unpopulate(val, "Properties", &h.Properties) delete(rawMsg, key) - case "warmupCosineLRCycles": - err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + case "provisioningErrors": + err = unpopulate(val, "ProvisioningErrors", &h.ProvisioningErrors) delete(rawMsg, key) - case "warmupCosineLRWarmupEpochs": - err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &h.ProvisioningState) delete(rawMsg, key) - case "weightDecay": - err = unpopulate(val, "WeightDecay", &i.WeightDecay) + case "resourceId": + err = unpopulate(val, "ResourceID", &h.ResourceID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", h, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageModelSettingsClassification. -func (i ImageModelSettingsClassification) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type HDInsightProperties. +func (h HDInsightProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "advancedSettings", i.AdvancedSettings) - populate(objectMap, "amsGradient", i.AmsGradient) - populate(objectMap, "augmentations", i.Augmentations) - populate(objectMap, "beta1", i.Beta1) - populate(objectMap, "beta2", i.Beta2) - populate(objectMap, "checkpointFrequency", i.CheckpointFrequency) - populate(objectMap, "checkpointModel", i.CheckpointModel) - populate(objectMap, "checkpointRunId", i.CheckpointRunID) - populate(objectMap, "distributed", i.Distributed) - populate(objectMap, "earlyStopping", i.EarlyStopping) - populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) - populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) - populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) - populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) - populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) - populate(objectMap, "layersToFreeze", i.LayersToFreeze) - populate(objectMap, "learningRate", i.LearningRate) - populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) - populate(objectMap, "modelName", i.ModelName) - populate(objectMap, "momentum", i.Momentum) - populate(objectMap, "nesterov", i.Nesterov) - populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) - populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) - populate(objectMap, "optimizer", i.Optimizer) - populate(objectMap, "randomSeed", i.RandomSeed) - populate(objectMap, "stepLRGamma", i.StepLRGamma) - populate(objectMap, "stepLRStepSize", i.StepLRStepSize) - populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) - populate(objectMap, "trainingCropSize", i.TrainingCropSize) - populate(objectMap, "validationBatchSize", i.ValidationBatchSize) - populate(objectMap, "validationCropSize", i.ValidationCropSize) - populate(objectMap, "validationResizeSize", i.ValidationResizeSize) - populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) - populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) - populate(objectMap, "weightDecay", i.WeightDecay) - populate(objectMap, "weightedLoss", i.WeightedLoss) + populate(objectMap, "address", h.Address) + populate(objectMap, "administratorAccount", h.AdministratorAccount) + populate(objectMap, "sshPort", h.SSHPort) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelSettingsClassification. -func (i *ImageModelSettingsClassification) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type HDInsightProperties. +func (h *HDInsightProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", h, err) } for key, val := range rawMsg { var err error switch key { - case "advancedSettings": - err = unpopulate(val, "AdvancedSettings", &i.AdvancedSettings) - delete(rawMsg, key) - case "amsGradient": - err = unpopulate(val, "AmsGradient", &i.AmsGradient) - delete(rawMsg, key) - case "augmentations": - err = unpopulate(val, "Augmentations", &i.Augmentations) - delete(rawMsg, key) - case "beta1": - err = unpopulate(val, "Beta1", &i.Beta1) - delete(rawMsg, key) - case "beta2": - err = unpopulate(val, "Beta2", &i.Beta2) - delete(rawMsg, key) - case "checkpointFrequency": - err = unpopulate(val, "CheckpointFrequency", &i.CheckpointFrequency) - delete(rawMsg, key) - case "checkpointModel": - err = unpopulate(val, "CheckpointModel", &i.CheckpointModel) - delete(rawMsg, key) - case "checkpointRunId": - err = unpopulate(val, "CheckpointRunID", &i.CheckpointRunID) - delete(rawMsg, key) - case "distributed": - err = unpopulate(val, "Distributed", &i.Distributed) - delete(rawMsg, key) - case "earlyStopping": - err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) - delete(rawMsg, key) - case "earlyStoppingDelay": - err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) - delete(rawMsg, key) - case "earlyStoppingPatience": - err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) - delete(rawMsg, key) - case "enableOnnxNormalization": - err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) - delete(rawMsg, key) - case "evaluationFrequency": - err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) - delete(rawMsg, key) - case "gradientAccumulationStep": - err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) - delete(rawMsg, key) - case "layersToFreeze": - err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) - delete(rawMsg, key) - case "learningRate": - err = unpopulate(val, "LearningRate", &i.LearningRate) - delete(rawMsg, key) - case "learningRateScheduler": - err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) - delete(rawMsg, key) - case "modelName": - err = unpopulate(val, "ModelName", &i.ModelName) - delete(rawMsg, key) - case "momentum": - err = unpopulate(val, "Momentum", &i.Momentum) - delete(rawMsg, key) - case "nesterov": - err = unpopulate(val, "Nesterov", &i.Nesterov) - delete(rawMsg, key) - case "numberOfEpochs": - err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) - delete(rawMsg, key) - case "numberOfWorkers": - err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + case "address": + err = unpopulate(val, "Address", &h.Address) delete(rawMsg, key) - case "optimizer": - err = unpopulate(val, "Optimizer", &i.Optimizer) + case "administratorAccount": + err = unpopulate(val, "AdministratorAccount", &h.AdministratorAccount) delete(rawMsg, key) - case "randomSeed": - err = unpopulate(val, "RandomSeed", &i.RandomSeed) + case "sshPort": + err = unpopulate(val, "SSHPort", &h.SSHPort) delete(rawMsg, key) - case "stepLRGamma": - err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", h, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type HDInsightSchema. +func (h HDInsightSchema) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "properties", h.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type HDInsightSchema. +func (h *HDInsightSchema) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", h, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "properties": + err = unpopulate(val, "Properties", &h.Properties) delete(rawMsg, key) - case "stepLRStepSize": - err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", h, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type HdfsDatastore. +func (h HdfsDatastore) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "credentials", h.Credentials) + objectMap["datastoreType"] = DatastoreTypeHdfs + populate(objectMap, "description", h.Description) + populate(objectMap, "hdfsServerCertificate", h.HdfsServerCertificate) + populate(objectMap, "intellectualProperty", h.IntellectualProperty) + populate(objectMap, "isDefault", h.IsDefault) + populate(objectMap, "nameNodeAddress", h.NameNodeAddress) + populate(objectMap, "properties", h.Properties) + populate(objectMap, "protocol", h.Protocol) + populate(objectMap, "tags", h.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type HdfsDatastore. +func (h *HdfsDatastore) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", h, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "credentials": + h.Credentials, err = unmarshalDatastoreCredentialsClassification(val) delete(rawMsg, key) - case "trainingBatchSize": - err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + case "datastoreType": + err = unpopulate(val, "DatastoreType", &h.DatastoreType) delete(rawMsg, key) - case "trainingCropSize": - err = unpopulate(val, "TrainingCropSize", &i.TrainingCropSize) + case "description": + err = unpopulate(val, "Description", &h.Description) delete(rawMsg, key) - case "validationBatchSize": - err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + case "hdfsServerCertificate": + err = unpopulate(val, "HdfsServerCertificate", &h.HdfsServerCertificate) delete(rawMsg, key) - case "validationCropSize": - err = unpopulate(val, "ValidationCropSize", &i.ValidationCropSize) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &h.IntellectualProperty) delete(rawMsg, key) - case "validationResizeSize": - err = unpopulate(val, "ValidationResizeSize", &i.ValidationResizeSize) + case "isDefault": + err = unpopulate(val, "IsDefault", &h.IsDefault) delete(rawMsg, key) - case "warmupCosineLRCycles": - err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + case "nameNodeAddress": + err = unpopulate(val, "NameNodeAddress", &h.NameNodeAddress) delete(rawMsg, key) - case "warmupCosineLRWarmupEpochs": - err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + case "properties": + err = unpopulate(val, "Properties", &h.Properties) delete(rawMsg, key) - case "weightDecay": - err = unpopulate(val, "WeightDecay", &i.WeightDecay) + case "protocol": + err = unpopulate(val, "Protocol", &h.Protocol) delete(rawMsg, key) - case "weightedLoss": - err = unpopulate(val, "WeightedLoss", &i.WeightedLoss) + case "tags": + err = unpopulate(val, "Tags", &h.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", h, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageModelSettingsObjectDetection. -func (i ImageModelSettingsObjectDetection) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type IDAssetReference. +func (i IDAssetReference) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "advancedSettings", i.AdvancedSettings) - populate(objectMap, "amsGradient", i.AmsGradient) - populate(objectMap, "augmentations", i.Augmentations) - populate(objectMap, "beta1", i.Beta1) - populate(objectMap, "beta2", i.Beta2) - populate(objectMap, "boxDetectionsPerImage", i.BoxDetectionsPerImage) - populate(objectMap, "boxScoreThreshold", i.BoxScoreThreshold) - populate(objectMap, "checkpointFrequency", i.CheckpointFrequency) - populate(objectMap, "checkpointModel", i.CheckpointModel) - populate(objectMap, "checkpointRunId", i.CheckpointRunID) - populate(objectMap, "distributed", i.Distributed) - populate(objectMap, "earlyStopping", i.EarlyStopping) - populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) - populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) - populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) - populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) - populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) - populate(objectMap, "imageSize", i.ImageSize) - populate(objectMap, "layersToFreeze", i.LayersToFreeze) - populate(objectMap, "learningRate", i.LearningRate) - populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) - populate(objectMap, "maxSize", i.MaxSize) - populate(objectMap, "minSize", i.MinSize) - populate(objectMap, "modelName", i.ModelName) - populate(objectMap, "modelSize", i.ModelSize) - populate(objectMap, "momentum", i.Momentum) - populate(objectMap, "multiScale", i.MultiScale) - populate(objectMap, "nesterov", i.Nesterov) - populate(objectMap, "nmsIouThreshold", i.NmsIouThreshold) - populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) - populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) - populate(objectMap, "optimizer", i.Optimizer) - populate(objectMap, "randomSeed", i.RandomSeed) - populate(objectMap, "stepLRGamma", i.StepLRGamma) - populate(objectMap, "stepLRStepSize", i.StepLRStepSize) - populate(objectMap, "tileGridSize", i.TileGridSize) - populate(objectMap, "tileOverlapRatio", i.TileOverlapRatio) - populate(objectMap, "tilePredictionsNmsThreshold", i.TilePredictionsNmsThreshold) - populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) - populate(objectMap, "validationBatchSize", i.ValidationBatchSize) - populate(objectMap, "validationIouThreshold", i.ValidationIouThreshold) - populate(objectMap, "validationMetricType", i.ValidationMetricType) - populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) - populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) - populate(objectMap, "weightDecay", i.WeightDecay) + populate(objectMap, "assetId", i.AssetID) + objectMap["referenceType"] = ReferenceTypeID return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelSettingsObjectDetection. -func (i *ImageModelSettingsObjectDetection) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type IDAssetReference. +func (i *IDAssetReference) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", i, err) @@ -7958,3740 +10210,9151 @@ func (i *ImageModelSettingsObjectDetection) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "advancedSettings": - err = unpopulate(val, "AdvancedSettings", &i.AdvancedSettings) + case "assetId": + err = unpopulate(val, "AssetID", &i.AssetID) + delete(rawMsg, key) + case "referenceType": + err = unpopulate(val, "ReferenceType", &i.ReferenceType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type IdentityConfiguration. +func (i IdentityConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["identityType"] = i.IdentityType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type IdentityConfiguration. +func (i *IdentityConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "identityType": + err = unpopulate(val, "IdentityType", &i.IdentityType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type IdentityForCmk. +func (i IdentityForCmk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "userAssignedIdentity", i.UserAssignedIdentity) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type IdentityForCmk. +func (i *IdentityForCmk) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "userAssignedIdentity": + err = unpopulate(val, "UserAssignedIdentity", &i.UserAssignedIdentity) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type IdleShutdownSetting. +func (i IdleShutdownSetting) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "idleTimeBeforeShutdown", i.IdleTimeBeforeShutdown) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type IdleShutdownSetting. +func (i *IdleShutdownSetting) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "idleTimeBeforeShutdown": + err = unpopulate(val, "IdleTimeBeforeShutdown", &i.IdleTimeBeforeShutdown) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Image. +func (i Image) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "reference", i.Reference) + populate(objectMap, "type", i.Type) + if i.AdditionalProperties != nil { + for key, val := range i.AdditionalProperties { + objectMap[key] = val + } + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Image. +func (i *Image) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "reference": + err = unpopulate(val, "Reference", &i.Reference) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &i.Type) + delete(rawMsg, key) + default: + if i.AdditionalProperties == nil { + i.AdditionalProperties = map[string]any{} + } + if val != nil { + var aux any + err = json.Unmarshal(val, &aux) + i.AdditionalProperties[key] = aux + } + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageClassification. +func (i ImageClassification) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limitSettings", i.LimitSettings) + populate(objectMap, "logVerbosity", i.LogVerbosity) + populate(objectMap, "modelSettings", i.ModelSettings) + populate(objectMap, "primaryMetric", i.PrimaryMetric) + populate(objectMap, "searchSpace", i.SearchSpace) + populate(objectMap, "sweepSettings", i.SweepSettings) + populate(objectMap, "targetColumnName", i.TargetColumnName) + objectMap["taskType"] = TaskTypeImageClassification + populate(objectMap, "trainingData", i.TrainingData) + populate(objectMap, "validationData", i.ValidationData) + populate(objectMap, "validationDataSize", i.ValidationDataSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageClassification. +func (i *ImageClassification) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limitSettings": + err = unpopulate(val, "LimitSettings", &i.LimitSettings) + delete(rawMsg, key) + case "logVerbosity": + err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) + delete(rawMsg, key) + case "modelSettings": + err = unpopulate(val, "ModelSettings", &i.ModelSettings) + delete(rawMsg, key) + case "primaryMetric": + err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &i.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &i.SweepSettings) + delete(rawMsg, key) + case "targetColumnName": + err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) + delete(rawMsg, key) + case "taskType": + err = unpopulate(val, "TaskType", &i.TaskType) + delete(rawMsg, key) + case "trainingData": + err = unpopulate(val, "TrainingData", &i.TrainingData) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &i.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageClassificationBase. +func (i ImageClassificationBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limitSettings", i.LimitSettings) + populate(objectMap, "modelSettings", i.ModelSettings) + populate(objectMap, "searchSpace", i.SearchSpace) + populate(objectMap, "sweepSettings", i.SweepSettings) + populate(objectMap, "validationData", i.ValidationData) + populate(objectMap, "validationDataSize", i.ValidationDataSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageClassificationBase. +func (i *ImageClassificationBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limitSettings": + err = unpopulate(val, "LimitSettings", &i.LimitSettings) + delete(rawMsg, key) + case "modelSettings": + err = unpopulate(val, "ModelSettings", &i.ModelSettings) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &i.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &i.SweepSettings) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &i.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageClassificationMultilabel. +func (i ImageClassificationMultilabel) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limitSettings", i.LimitSettings) + populate(objectMap, "logVerbosity", i.LogVerbosity) + populate(objectMap, "modelSettings", i.ModelSettings) + populate(objectMap, "primaryMetric", i.PrimaryMetric) + populate(objectMap, "searchSpace", i.SearchSpace) + populate(objectMap, "sweepSettings", i.SweepSettings) + populate(objectMap, "targetColumnName", i.TargetColumnName) + objectMap["taskType"] = TaskTypeImageClassificationMultilabel + populate(objectMap, "trainingData", i.TrainingData) + populate(objectMap, "validationData", i.ValidationData) + populate(objectMap, "validationDataSize", i.ValidationDataSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageClassificationMultilabel. +func (i *ImageClassificationMultilabel) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limitSettings": + err = unpopulate(val, "LimitSettings", &i.LimitSettings) + delete(rawMsg, key) + case "logVerbosity": + err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) + delete(rawMsg, key) + case "modelSettings": + err = unpopulate(val, "ModelSettings", &i.ModelSettings) + delete(rawMsg, key) + case "primaryMetric": + err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &i.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &i.SweepSettings) + delete(rawMsg, key) + case "targetColumnName": + err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) + delete(rawMsg, key) + case "taskType": + err = unpopulate(val, "TaskType", &i.TaskType) + delete(rawMsg, key) + case "trainingData": + err = unpopulate(val, "TrainingData", &i.TrainingData) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &i.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageInstanceSegmentation. +func (i ImageInstanceSegmentation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limitSettings", i.LimitSettings) + populate(objectMap, "logVerbosity", i.LogVerbosity) + populate(objectMap, "modelSettings", i.ModelSettings) + populate(objectMap, "primaryMetric", i.PrimaryMetric) + populate(objectMap, "searchSpace", i.SearchSpace) + populate(objectMap, "sweepSettings", i.SweepSettings) + populate(objectMap, "targetColumnName", i.TargetColumnName) + objectMap["taskType"] = TaskTypeImageInstanceSegmentation + populate(objectMap, "trainingData", i.TrainingData) + populate(objectMap, "validationData", i.ValidationData) + populate(objectMap, "validationDataSize", i.ValidationDataSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageInstanceSegmentation. +func (i *ImageInstanceSegmentation) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limitSettings": + err = unpopulate(val, "LimitSettings", &i.LimitSettings) + delete(rawMsg, key) + case "logVerbosity": + err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) + delete(rawMsg, key) + case "modelSettings": + err = unpopulate(val, "ModelSettings", &i.ModelSettings) + delete(rawMsg, key) + case "primaryMetric": + err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &i.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &i.SweepSettings) + delete(rawMsg, key) + case "targetColumnName": + err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) + delete(rawMsg, key) + case "taskType": + err = unpopulate(val, "TaskType", &i.TaskType) + delete(rawMsg, key) + case "trainingData": + err = unpopulate(val, "TrainingData", &i.TrainingData) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &i.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageLimitSettings. +func (i ImageLimitSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "maxConcurrentTrials", i.MaxConcurrentTrials) + populate(objectMap, "maxTrials", i.MaxTrials) + populate(objectMap, "timeout", i.Timeout) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageLimitSettings. +func (i *ImageLimitSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "maxConcurrentTrials": + err = unpopulate(val, "MaxConcurrentTrials", &i.MaxConcurrentTrials) + delete(rawMsg, key) + case "maxTrials": + err = unpopulate(val, "MaxTrials", &i.MaxTrials) + delete(rawMsg, key) + case "timeout": + err = unpopulate(val, "Timeout", &i.Timeout) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageMetadata. +func (i ImageMetadata) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "currentImageVersion", i.CurrentImageVersion) + populate(objectMap, "isLatestOsImageVersion", i.IsLatestOsImageVersion) + populate(objectMap, "latestImageVersion", i.LatestImageVersion) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageMetadata. +func (i *ImageMetadata) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "currentImageVersion": + err = unpopulate(val, "CurrentImageVersion", &i.CurrentImageVersion) + delete(rawMsg, key) + case "isLatestOsImageVersion": + err = unpopulate(val, "IsLatestOsImageVersion", &i.IsLatestOsImageVersion) + delete(rawMsg, key) + case "latestImageVersion": + err = unpopulate(val, "LatestImageVersion", &i.LatestImageVersion) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageModelDistributionSettings. +func (i ImageModelDistributionSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "amsGradient", i.AmsGradient) + populate(objectMap, "augmentations", i.Augmentations) + populate(objectMap, "beta1", i.Beta1) + populate(objectMap, "beta2", i.Beta2) + populate(objectMap, "distributed", i.Distributed) + populate(objectMap, "earlyStopping", i.EarlyStopping) + populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) + populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) + populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) + populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) + populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) + populate(objectMap, "layersToFreeze", i.LayersToFreeze) + populate(objectMap, "learningRate", i.LearningRate) + populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) + populate(objectMap, "modelName", i.ModelName) + populate(objectMap, "momentum", i.Momentum) + populate(objectMap, "nesterov", i.Nesterov) + populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) + populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) + populate(objectMap, "optimizer", i.Optimizer) + populate(objectMap, "randomSeed", i.RandomSeed) + populate(objectMap, "stepLRGamma", i.StepLRGamma) + populate(objectMap, "stepLRStepSize", i.StepLRStepSize) + populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) + populate(objectMap, "validationBatchSize", i.ValidationBatchSize) + populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) + populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) + populate(objectMap, "weightDecay", i.WeightDecay) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelDistributionSettings. +func (i *ImageModelDistributionSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "amsGradient": + err = unpopulate(val, "AmsGradient", &i.AmsGradient) + delete(rawMsg, key) + case "augmentations": + err = unpopulate(val, "Augmentations", &i.Augmentations) + delete(rawMsg, key) + case "beta1": + err = unpopulate(val, "Beta1", &i.Beta1) + delete(rawMsg, key) + case "beta2": + err = unpopulate(val, "Beta2", &i.Beta2) + delete(rawMsg, key) + case "distributed": + err = unpopulate(val, "Distributed", &i.Distributed) + delete(rawMsg, key) + case "earlyStopping": + err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + delete(rawMsg, key) + case "earlyStoppingDelay": + err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + delete(rawMsg, key) + case "earlyStoppingPatience": + err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + delete(rawMsg, key) + case "enableOnnxNormalization": + err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + delete(rawMsg, key) + case "evaluationFrequency": + err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + delete(rawMsg, key) + case "gradientAccumulationStep": + err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + delete(rawMsg, key) + case "layersToFreeze": + err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + delete(rawMsg, key) + case "learningRate": + err = unpopulate(val, "LearningRate", &i.LearningRate) + delete(rawMsg, key) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &i.ModelName) + delete(rawMsg, key) + case "momentum": + err = unpopulate(val, "Momentum", &i.Momentum) + delete(rawMsg, key) + case "nesterov": + err = unpopulate(val, "Nesterov", &i.Nesterov) + delete(rawMsg, key) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + delete(rawMsg, key) + case "numberOfWorkers": + err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + delete(rawMsg, key) + case "optimizer": + err = unpopulate(val, "Optimizer", &i.Optimizer) + delete(rawMsg, key) + case "randomSeed": + err = unpopulate(val, "RandomSeed", &i.RandomSeed) + delete(rawMsg, key) + case "stepLRGamma": + err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + delete(rawMsg, key) + case "stepLRStepSize": + err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + delete(rawMsg, key) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + delete(rawMsg, key) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + delete(rawMsg, key) + case "warmupCosineLRCycles": + err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + delete(rawMsg, key) + case "warmupCosineLRWarmupEpochs": + err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + delete(rawMsg, key) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &i.WeightDecay) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageModelDistributionSettingsClassification. +func (i ImageModelDistributionSettingsClassification) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "amsGradient", i.AmsGradient) + populate(objectMap, "augmentations", i.Augmentations) + populate(objectMap, "beta1", i.Beta1) + populate(objectMap, "beta2", i.Beta2) + populate(objectMap, "distributed", i.Distributed) + populate(objectMap, "earlyStopping", i.EarlyStopping) + populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) + populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) + populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) + populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) + populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) + populate(objectMap, "layersToFreeze", i.LayersToFreeze) + populate(objectMap, "learningRate", i.LearningRate) + populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) + populate(objectMap, "modelName", i.ModelName) + populate(objectMap, "momentum", i.Momentum) + populate(objectMap, "nesterov", i.Nesterov) + populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) + populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) + populate(objectMap, "optimizer", i.Optimizer) + populate(objectMap, "randomSeed", i.RandomSeed) + populate(objectMap, "stepLRGamma", i.StepLRGamma) + populate(objectMap, "stepLRStepSize", i.StepLRStepSize) + populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) + populate(objectMap, "trainingCropSize", i.TrainingCropSize) + populate(objectMap, "validationBatchSize", i.ValidationBatchSize) + populate(objectMap, "validationCropSize", i.ValidationCropSize) + populate(objectMap, "validationResizeSize", i.ValidationResizeSize) + populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) + populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) + populate(objectMap, "weightDecay", i.WeightDecay) + populate(objectMap, "weightedLoss", i.WeightedLoss) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelDistributionSettingsClassification. +func (i *ImageModelDistributionSettingsClassification) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "amsGradient": + err = unpopulate(val, "AmsGradient", &i.AmsGradient) + delete(rawMsg, key) + case "augmentations": + err = unpopulate(val, "Augmentations", &i.Augmentations) + delete(rawMsg, key) + case "beta1": + err = unpopulate(val, "Beta1", &i.Beta1) + delete(rawMsg, key) + case "beta2": + err = unpopulate(val, "Beta2", &i.Beta2) + delete(rawMsg, key) + case "distributed": + err = unpopulate(val, "Distributed", &i.Distributed) + delete(rawMsg, key) + case "earlyStopping": + err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + delete(rawMsg, key) + case "earlyStoppingDelay": + err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + delete(rawMsg, key) + case "earlyStoppingPatience": + err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + delete(rawMsg, key) + case "enableOnnxNormalization": + err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + delete(rawMsg, key) + case "evaluationFrequency": + err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + delete(rawMsg, key) + case "gradientAccumulationStep": + err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + delete(rawMsg, key) + case "layersToFreeze": + err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + delete(rawMsg, key) + case "learningRate": + err = unpopulate(val, "LearningRate", &i.LearningRate) + delete(rawMsg, key) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &i.ModelName) + delete(rawMsg, key) + case "momentum": + err = unpopulate(val, "Momentum", &i.Momentum) + delete(rawMsg, key) + case "nesterov": + err = unpopulate(val, "Nesterov", &i.Nesterov) + delete(rawMsg, key) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + delete(rawMsg, key) + case "numberOfWorkers": + err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + delete(rawMsg, key) + case "optimizer": + err = unpopulate(val, "Optimizer", &i.Optimizer) + delete(rawMsg, key) + case "randomSeed": + err = unpopulate(val, "RandomSeed", &i.RandomSeed) + delete(rawMsg, key) + case "stepLRGamma": + err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + delete(rawMsg, key) + case "stepLRStepSize": + err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + delete(rawMsg, key) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + delete(rawMsg, key) + case "trainingCropSize": + err = unpopulate(val, "TrainingCropSize", &i.TrainingCropSize) + delete(rawMsg, key) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + delete(rawMsg, key) + case "validationCropSize": + err = unpopulate(val, "ValidationCropSize", &i.ValidationCropSize) + delete(rawMsg, key) + case "validationResizeSize": + err = unpopulate(val, "ValidationResizeSize", &i.ValidationResizeSize) + delete(rawMsg, key) + case "warmupCosineLRCycles": + err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + delete(rawMsg, key) + case "warmupCosineLRWarmupEpochs": + err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + delete(rawMsg, key) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &i.WeightDecay) + delete(rawMsg, key) + case "weightedLoss": + err = unpopulate(val, "WeightedLoss", &i.WeightedLoss) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageModelDistributionSettingsObjectDetection. +func (i ImageModelDistributionSettingsObjectDetection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "amsGradient", i.AmsGradient) + populate(objectMap, "augmentations", i.Augmentations) + populate(objectMap, "beta1", i.Beta1) + populate(objectMap, "beta2", i.Beta2) + populate(objectMap, "boxDetectionsPerImage", i.BoxDetectionsPerImage) + populate(objectMap, "boxScoreThreshold", i.BoxScoreThreshold) + populate(objectMap, "distributed", i.Distributed) + populate(objectMap, "earlyStopping", i.EarlyStopping) + populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) + populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) + populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) + populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) + populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) + populate(objectMap, "imageSize", i.ImageSize) + populate(objectMap, "layersToFreeze", i.LayersToFreeze) + populate(objectMap, "learningRate", i.LearningRate) + populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) + populate(objectMap, "maxSize", i.MaxSize) + populate(objectMap, "minSize", i.MinSize) + populate(objectMap, "modelName", i.ModelName) + populate(objectMap, "modelSize", i.ModelSize) + populate(objectMap, "momentum", i.Momentum) + populate(objectMap, "multiScale", i.MultiScale) + populate(objectMap, "nesterov", i.Nesterov) + populate(objectMap, "nmsIouThreshold", i.NmsIouThreshold) + populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) + populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) + populate(objectMap, "optimizer", i.Optimizer) + populate(objectMap, "randomSeed", i.RandomSeed) + populate(objectMap, "stepLRGamma", i.StepLRGamma) + populate(objectMap, "stepLRStepSize", i.StepLRStepSize) + populate(objectMap, "tileGridSize", i.TileGridSize) + populate(objectMap, "tileOverlapRatio", i.TileOverlapRatio) + populate(objectMap, "tilePredictionsNmsThreshold", i.TilePredictionsNmsThreshold) + populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) + populate(objectMap, "validationBatchSize", i.ValidationBatchSize) + populate(objectMap, "validationIouThreshold", i.ValidationIouThreshold) + populate(objectMap, "validationMetricType", i.ValidationMetricType) + populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) + populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) + populate(objectMap, "weightDecay", i.WeightDecay) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelDistributionSettingsObjectDetection. +func (i *ImageModelDistributionSettingsObjectDetection) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "amsGradient": + err = unpopulate(val, "AmsGradient", &i.AmsGradient) + delete(rawMsg, key) + case "augmentations": + err = unpopulate(val, "Augmentations", &i.Augmentations) + delete(rawMsg, key) + case "beta1": + err = unpopulate(val, "Beta1", &i.Beta1) + delete(rawMsg, key) + case "beta2": + err = unpopulate(val, "Beta2", &i.Beta2) + delete(rawMsg, key) + case "boxDetectionsPerImage": + err = unpopulate(val, "BoxDetectionsPerImage", &i.BoxDetectionsPerImage) + delete(rawMsg, key) + case "boxScoreThreshold": + err = unpopulate(val, "BoxScoreThreshold", &i.BoxScoreThreshold) + delete(rawMsg, key) + case "distributed": + err = unpopulate(val, "Distributed", &i.Distributed) + delete(rawMsg, key) + case "earlyStopping": + err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + delete(rawMsg, key) + case "earlyStoppingDelay": + err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + delete(rawMsg, key) + case "earlyStoppingPatience": + err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + delete(rawMsg, key) + case "enableOnnxNormalization": + err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + delete(rawMsg, key) + case "evaluationFrequency": + err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + delete(rawMsg, key) + case "gradientAccumulationStep": + err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + delete(rawMsg, key) + case "imageSize": + err = unpopulate(val, "ImageSize", &i.ImageSize) + delete(rawMsg, key) + case "layersToFreeze": + err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + delete(rawMsg, key) + case "learningRate": + err = unpopulate(val, "LearningRate", &i.LearningRate) + delete(rawMsg, key) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + delete(rawMsg, key) + case "maxSize": + err = unpopulate(val, "MaxSize", &i.MaxSize) + delete(rawMsg, key) + case "minSize": + err = unpopulate(val, "MinSize", &i.MinSize) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &i.ModelName) + delete(rawMsg, key) + case "modelSize": + err = unpopulate(val, "ModelSize", &i.ModelSize) + delete(rawMsg, key) + case "momentum": + err = unpopulate(val, "Momentum", &i.Momentum) + delete(rawMsg, key) + case "multiScale": + err = unpopulate(val, "MultiScale", &i.MultiScale) + delete(rawMsg, key) + case "nesterov": + err = unpopulate(val, "Nesterov", &i.Nesterov) + delete(rawMsg, key) + case "nmsIouThreshold": + err = unpopulate(val, "NmsIouThreshold", &i.NmsIouThreshold) + delete(rawMsg, key) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + delete(rawMsg, key) + case "numberOfWorkers": + err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + delete(rawMsg, key) + case "optimizer": + err = unpopulate(val, "Optimizer", &i.Optimizer) + delete(rawMsg, key) + case "randomSeed": + err = unpopulate(val, "RandomSeed", &i.RandomSeed) + delete(rawMsg, key) + case "stepLRGamma": + err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + delete(rawMsg, key) + case "stepLRStepSize": + err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + delete(rawMsg, key) + case "tileGridSize": + err = unpopulate(val, "TileGridSize", &i.TileGridSize) + delete(rawMsg, key) + case "tileOverlapRatio": + err = unpopulate(val, "TileOverlapRatio", &i.TileOverlapRatio) + delete(rawMsg, key) + case "tilePredictionsNmsThreshold": + err = unpopulate(val, "TilePredictionsNmsThreshold", &i.TilePredictionsNmsThreshold) + delete(rawMsg, key) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + delete(rawMsg, key) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + delete(rawMsg, key) + case "validationIouThreshold": + err = unpopulate(val, "ValidationIouThreshold", &i.ValidationIouThreshold) + delete(rawMsg, key) + case "validationMetricType": + err = unpopulate(val, "ValidationMetricType", &i.ValidationMetricType) + delete(rawMsg, key) + case "warmupCosineLRCycles": + err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + delete(rawMsg, key) + case "warmupCosineLRWarmupEpochs": + err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + delete(rawMsg, key) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &i.WeightDecay) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageModelSettings. +func (i ImageModelSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "advancedSettings", i.AdvancedSettings) + populate(objectMap, "amsGradient", i.AmsGradient) + populate(objectMap, "augmentations", i.Augmentations) + populate(objectMap, "beta1", i.Beta1) + populate(objectMap, "beta2", i.Beta2) + populate(objectMap, "checkpointFrequency", i.CheckpointFrequency) + populate(objectMap, "checkpointModel", i.CheckpointModel) + populate(objectMap, "checkpointRunId", i.CheckpointRunID) + populate(objectMap, "distributed", i.Distributed) + populate(objectMap, "earlyStopping", i.EarlyStopping) + populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) + populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) + populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) + populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) + populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) + populate(objectMap, "layersToFreeze", i.LayersToFreeze) + populate(objectMap, "learningRate", i.LearningRate) + populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) + populate(objectMap, "modelName", i.ModelName) + populate(objectMap, "momentum", i.Momentum) + populate(objectMap, "nesterov", i.Nesterov) + populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) + populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) + populate(objectMap, "optimizer", i.Optimizer) + populate(objectMap, "randomSeed", i.RandomSeed) + populate(objectMap, "stepLRGamma", i.StepLRGamma) + populate(objectMap, "stepLRStepSize", i.StepLRStepSize) + populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) + populate(objectMap, "validationBatchSize", i.ValidationBatchSize) + populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) + populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) + populate(objectMap, "weightDecay", i.WeightDecay) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelSettings. +func (i *ImageModelSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "advancedSettings": + err = unpopulate(val, "AdvancedSettings", &i.AdvancedSettings) + delete(rawMsg, key) + case "amsGradient": + err = unpopulate(val, "AmsGradient", &i.AmsGradient) + delete(rawMsg, key) + case "augmentations": + err = unpopulate(val, "Augmentations", &i.Augmentations) + delete(rawMsg, key) + case "beta1": + err = unpopulate(val, "Beta1", &i.Beta1) + delete(rawMsg, key) + case "beta2": + err = unpopulate(val, "Beta2", &i.Beta2) + delete(rawMsg, key) + case "checkpointFrequency": + err = unpopulate(val, "CheckpointFrequency", &i.CheckpointFrequency) + delete(rawMsg, key) + case "checkpointModel": + err = unpopulate(val, "CheckpointModel", &i.CheckpointModel) + delete(rawMsg, key) + case "checkpointRunId": + err = unpopulate(val, "CheckpointRunID", &i.CheckpointRunID) + delete(rawMsg, key) + case "distributed": + err = unpopulate(val, "Distributed", &i.Distributed) + delete(rawMsg, key) + case "earlyStopping": + err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + delete(rawMsg, key) + case "earlyStoppingDelay": + err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + delete(rawMsg, key) + case "earlyStoppingPatience": + err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + delete(rawMsg, key) + case "enableOnnxNormalization": + err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + delete(rawMsg, key) + case "evaluationFrequency": + err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + delete(rawMsg, key) + case "gradientAccumulationStep": + err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + delete(rawMsg, key) + case "layersToFreeze": + err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + delete(rawMsg, key) + case "learningRate": + err = unpopulate(val, "LearningRate", &i.LearningRate) + delete(rawMsg, key) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &i.ModelName) + delete(rawMsg, key) + case "momentum": + err = unpopulate(val, "Momentum", &i.Momentum) + delete(rawMsg, key) + case "nesterov": + err = unpopulate(val, "Nesterov", &i.Nesterov) + delete(rawMsg, key) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + delete(rawMsg, key) + case "numberOfWorkers": + err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + delete(rawMsg, key) + case "optimizer": + err = unpopulate(val, "Optimizer", &i.Optimizer) + delete(rawMsg, key) + case "randomSeed": + err = unpopulate(val, "RandomSeed", &i.RandomSeed) + delete(rawMsg, key) + case "stepLRGamma": + err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + delete(rawMsg, key) + case "stepLRStepSize": + err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + delete(rawMsg, key) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + delete(rawMsg, key) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + delete(rawMsg, key) + case "warmupCosineLRCycles": + err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + delete(rawMsg, key) + case "warmupCosineLRWarmupEpochs": + err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + delete(rawMsg, key) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &i.WeightDecay) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageModelSettingsClassification. +func (i ImageModelSettingsClassification) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "advancedSettings", i.AdvancedSettings) + populate(objectMap, "amsGradient", i.AmsGradient) + populate(objectMap, "augmentations", i.Augmentations) + populate(objectMap, "beta1", i.Beta1) + populate(objectMap, "beta2", i.Beta2) + populate(objectMap, "checkpointFrequency", i.CheckpointFrequency) + populate(objectMap, "checkpointModel", i.CheckpointModel) + populate(objectMap, "checkpointRunId", i.CheckpointRunID) + populate(objectMap, "distributed", i.Distributed) + populate(objectMap, "earlyStopping", i.EarlyStopping) + populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) + populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) + populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) + populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) + populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) + populate(objectMap, "layersToFreeze", i.LayersToFreeze) + populate(objectMap, "learningRate", i.LearningRate) + populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) + populate(objectMap, "modelName", i.ModelName) + populate(objectMap, "momentum", i.Momentum) + populate(objectMap, "nesterov", i.Nesterov) + populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) + populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) + populate(objectMap, "optimizer", i.Optimizer) + populate(objectMap, "randomSeed", i.RandomSeed) + populate(objectMap, "stepLRGamma", i.StepLRGamma) + populate(objectMap, "stepLRStepSize", i.StepLRStepSize) + populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) + populate(objectMap, "trainingCropSize", i.TrainingCropSize) + populate(objectMap, "validationBatchSize", i.ValidationBatchSize) + populate(objectMap, "validationCropSize", i.ValidationCropSize) + populate(objectMap, "validationResizeSize", i.ValidationResizeSize) + populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) + populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) + populate(objectMap, "weightDecay", i.WeightDecay) + populate(objectMap, "weightedLoss", i.WeightedLoss) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelSettingsClassification. +func (i *ImageModelSettingsClassification) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "advancedSettings": + err = unpopulate(val, "AdvancedSettings", &i.AdvancedSettings) + delete(rawMsg, key) + case "amsGradient": + err = unpopulate(val, "AmsGradient", &i.AmsGradient) + delete(rawMsg, key) + case "augmentations": + err = unpopulate(val, "Augmentations", &i.Augmentations) + delete(rawMsg, key) + case "beta1": + err = unpopulate(val, "Beta1", &i.Beta1) + delete(rawMsg, key) + case "beta2": + err = unpopulate(val, "Beta2", &i.Beta2) + delete(rawMsg, key) + case "checkpointFrequency": + err = unpopulate(val, "CheckpointFrequency", &i.CheckpointFrequency) + delete(rawMsg, key) + case "checkpointModel": + err = unpopulate(val, "CheckpointModel", &i.CheckpointModel) + delete(rawMsg, key) + case "checkpointRunId": + err = unpopulate(val, "CheckpointRunID", &i.CheckpointRunID) + delete(rawMsg, key) + case "distributed": + err = unpopulate(val, "Distributed", &i.Distributed) + delete(rawMsg, key) + case "earlyStopping": + err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + delete(rawMsg, key) + case "earlyStoppingDelay": + err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + delete(rawMsg, key) + case "earlyStoppingPatience": + err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + delete(rawMsg, key) + case "enableOnnxNormalization": + err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + delete(rawMsg, key) + case "evaluationFrequency": + err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + delete(rawMsg, key) + case "gradientAccumulationStep": + err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + delete(rawMsg, key) + case "layersToFreeze": + err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + delete(rawMsg, key) + case "learningRate": + err = unpopulate(val, "LearningRate", &i.LearningRate) + delete(rawMsg, key) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &i.ModelName) + delete(rawMsg, key) + case "momentum": + err = unpopulate(val, "Momentum", &i.Momentum) + delete(rawMsg, key) + case "nesterov": + err = unpopulate(val, "Nesterov", &i.Nesterov) + delete(rawMsg, key) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + delete(rawMsg, key) + case "numberOfWorkers": + err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + delete(rawMsg, key) + case "optimizer": + err = unpopulate(val, "Optimizer", &i.Optimizer) + delete(rawMsg, key) + case "randomSeed": + err = unpopulate(val, "RandomSeed", &i.RandomSeed) + delete(rawMsg, key) + case "stepLRGamma": + err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + delete(rawMsg, key) + case "stepLRStepSize": + err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + delete(rawMsg, key) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + delete(rawMsg, key) + case "trainingCropSize": + err = unpopulate(val, "TrainingCropSize", &i.TrainingCropSize) + delete(rawMsg, key) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + delete(rawMsg, key) + case "validationCropSize": + err = unpopulate(val, "ValidationCropSize", &i.ValidationCropSize) + delete(rawMsg, key) + case "validationResizeSize": + err = unpopulate(val, "ValidationResizeSize", &i.ValidationResizeSize) + delete(rawMsg, key) + case "warmupCosineLRCycles": + err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + delete(rawMsg, key) + case "warmupCosineLRWarmupEpochs": + err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + delete(rawMsg, key) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &i.WeightDecay) + delete(rawMsg, key) + case "weightedLoss": + err = unpopulate(val, "WeightedLoss", &i.WeightedLoss) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageModelSettingsObjectDetection. +func (i ImageModelSettingsObjectDetection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "advancedSettings", i.AdvancedSettings) + populate(objectMap, "amsGradient", i.AmsGradient) + populate(objectMap, "augmentations", i.Augmentations) + populate(objectMap, "beta1", i.Beta1) + populate(objectMap, "beta2", i.Beta2) + populate(objectMap, "boxDetectionsPerImage", i.BoxDetectionsPerImage) + populate(objectMap, "boxScoreThreshold", i.BoxScoreThreshold) + populate(objectMap, "checkpointFrequency", i.CheckpointFrequency) + populate(objectMap, "checkpointModel", i.CheckpointModel) + populate(objectMap, "checkpointRunId", i.CheckpointRunID) + populate(objectMap, "distributed", i.Distributed) + populate(objectMap, "earlyStopping", i.EarlyStopping) + populate(objectMap, "earlyStoppingDelay", i.EarlyStoppingDelay) + populate(objectMap, "earlyStoppingPatience", i.EarlyStoppingPatience) + populate(objectMap, "enableOnnxNormalization", i.EnableOnnxNormalization) + populate(objectMap, "evaluationFrequency", i.EvaluationFrequency) + populate(objectMap, "gradientAccumulationStep", i.GradientAccumulationStep) + populate(objectMap, "imageSize", i.ImageSize) + populate(objectMap, "layersToFreeze", i.LayersToFreeze) + populate(objectMap, "learningRate", i.LearningRate) + populate(objectMap, "learningRateScheduler", i.LearningRateScheduler) + populate(objectMap, "logTrainingMetrics", i.LogTrainingMetrics) + populate(objectMap, "logValidationLoss", i.LogValidationLoss) + populate(objectMap, "maxSize", i.MaxSize) + populate(objectMap, "minSize", i.MinSize) + populate(objectMap, "modelName", i.ModelName) + populate(objectMap, "modelSize", i.ModelSize) + populate(objectMap, "momentum", i.Momentum) + populate(objectMap, "multiScale", i.MultiScale) + populate(objectMap, "nesterov", i.Nesterov) + populate(objectMap, "nmsIouThreshold", i.NmsIouThreshold) + populate(objectMap, "numberOfEpochs", i.NumberOfEpochs) + populate(objectMap, "numberOfWorkers", i.NumberOfWorkers) + populate(objectMap, "optimizer", i.Optimizer) + populate(objectMap, "randomSeed", i.RandomSeed) + populate(objectMap, "stepLRGamma", i.StepLRGamma) + populate(objectMap, "stepLRStepSize", i.StepLRStepSize) + populate(objectMap, "tileGridSize", i.TileGridSize) + populate(objectMap, "tileOverlapRatio", i.TileOverlapRatio) + populate(objectMap, "tilePredictionsNmsThreshold", i.TilePredictionsNmsThreshold) + populate(objectMap, "trainingBatchSize", i.TrainingBatchSize) + populate(objectMap, "validationBatchSize", i.ValidationBatchSize) + populate(objectMap, "validationIouThreshold", i.ValidationIouThreshold) + populate(objectMap, "validationMetricType", i.ValidationMetricType) + populate(objectMap, "warmupCosineLRCycles", i.WarmupCosineLRCycles) + populate(objectMap, "warmupCosineLRWarmupEpochs", i.WarmupCosineLRWarmupEpochs) + populate(objectMap, "weightDecay", i.WeightDecay) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageModelSettingsObjectDetection. +func (i *ImageModelSettingsObjectDetection) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "advancedSettings": + err = unpopulate(val, "AdvancedSettings", &i.AdvancedSettings) + delete(rawMsg, key) + case "amsGradient": + err = unpopulate(val, "AmsGradient", &i.AmsGradient) + delete(rawMsg, key) + case "augmentations": + err = unpopulate(val, "Augmentations", &i.Augmentations) + delete(rawMsg, key) + case "beta1": + err = unpopulate(val, "Beta1", &i.Beta1) + delete(rawMsg, key) + case "beta2": + err = unpopulate(val, "Beta2", &i.Beta2) + delete(rawMsg, key) + case "boxDetectionsPerImage": + err = unpopulate(val, "BoxDetectionsPerImage", &i.BoxDetectionsPerImage) + delete(rawMsg, key) + case "boxScoreThreshold": + err = unpopulate(val, "BoxScoreThreshold", &i.BoxScoreThreshold) + delete(rawMsg, key) + case "checkpointFrequency": + err = unpopulate(val, "CheckpointFrequency", &i.CheckpointFrequency) + delete(rawMsg, key) + case "checkpointModel": + err = unpopulate(val, "CheckpointModel", &i.CheckpointModel) + delete(rawMsg, key) + case "checkpointRunId": + err = unpopulate(val, "CheckpointRunID", &i.CheckpointRunID) + delete(rawMsg, key) + case "distributed": + err = unpopulate(val, "Distributed", &i.Distributed) + delete(rawMsg, key) + case "earlyStopping": + err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + delete(rawMsg, key) + case "earlyStoppingDelay": + err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + delete(rawMsg, key) + case "earlyStoppingPatience": + err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + delete(rawMsg, key) + case "enableOnnxNormalization": + err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + delete(rawMsg, key) + case "evaluationFrequency": + err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + delete(rawMsg, key) + case "gradientAccumulationStep": + err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + delete(rawMsg, key) + case "imageSize": + err = unpopulate(val, "ImageSize", &i.ImageSize) + delete(rawMsg, key) + case "layersToFreeze": + err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + delete(rawMsg, key) + case "learningRate": + err = unpopulate(val, "LearningRate", &i.LearningRate) + delete(rawMsg, key) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + delete(rawMsg, key) + case "logTrainingMetrics": + err = unpopulate(val, "LogTrainingMetrics", &i.LogTrainingMetrics) + delete(rawMsg, key) + case "logValidationLoss": + err = unpopulate(val, "LogValidationLoss", &i.LogValidationLoss) + delete(rawMsg, key) + case "maxSize": + err = unpopulate(val, "MaxSize", &i.MaxSize) + delete(rawMsg, key) + case "minSize": + err = unpopulate(val, "MinSize", &i.MinSize) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &i.ModelName) + delete(rawMsg, key) + case "modelSize": + err = unpopulate(val, "ModelSize", &i.ModelSize) + delete(rawMsg, key) + case "momentum": + err = unpopulate(val, "Momentum", &i.Momentum) + delete(rawMsg, key) + case "multiScale": + err = unpopulate(val, "MultiScale", &i.MultiScale) + delete(rawMsg, key) + case "nesterov": + err = unpopulate(val, "Nesterov", &i.Nesterov) + delete(rawMsg, key) + case "nmsIouThreshold": + err = unpopulate(val, "NmsIouThreshold", &i.NmsIouThreshold) + delete(rawMsg, key) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + delete(rawMsg, key) + case "numberOfWorkers": + err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + delete(rawMsg, key) + case "optimizer": + err = unpopulate(val, "Optimizer", &i.Optimizer) + delete(rawMsg, key) + case "randomSeed": + err = unpopulate(val, "RandomSeed", &i.RandomSeed) + delete(rawMsg, key) + case "stepLRGamma": + err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + delete(rawMsg, key) + case "stepLRStepSize": + err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + delete(rawMsg, key) + case "tileGridSize": + err = unpopulate(val, "TileGridSize", &i.TileGridSize) + delete(rawMsg, key) + case "tileOverlapRatio": + err = unpopulate(val, "TileOverlapRatio", &i.TileOverlapRatio) + delete(rawMsg, key) + case "tilePredictionsNmsThreshold": + err = unpopulate(val, "TilePredictionsNmsThreshold", &i.TilePredictionsNmsThreshold) + delete(rawMsg, key) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + delete(rawMsg, key) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + delete(rawMsg, key) + case "validationIouThreshold": + err = unpopulate(val, "ValidationIouThreshold", &i.ValidationIouThreshold) + delete(rawMsg, key) + case "validationMetricType": + err = unpopulate(val, "ValidationMetricType", &i.ValidationMetricType) + delete(rawMsg, key) + case "warmupCosineLRCycles": + err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + delete(rawMsg, key) + case "warmupCosineLRWarmupEpochs": + err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + delete(rawMsg, key) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &i.WeightDecay) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageObjectDetection. +func (i ImageObjectDetection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limitSettings", i.LimitSettings) + populate(objectMap, "logVerbosity", i.LogVerbosity) + populate(objectMap, "modelSettings", i.ModelSettings) + populate(objectMap, "primaryMetric", i.PrimaryMetric) + populate(objectMap, "searchSpace", i.SearchSpace) + populate(objectMap, "sweepSettings", i.SweepSettings) + populate(objectMap, "targetColumnName", i.TargetColumnName) + objectMap["taskType"] = TaskTypeImageObjectDetection + populate(objectMap, "trainingData", i.TrainingData) + populate(objectMap, "validationData", i.ValidationData) + populate(objectMap, "validationDataSize", i.ValidationDataSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageObjectDetection. +func (i *ImageObjectDetection) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limitSettings": + err = unpopulate(val, "LimitSettings", &i.LimitSettings) + delete(rawMsg, key) + case "logVerbosity": + err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) + delete(rawMsg, key) + case "modelSettings": + err = unpopulate(val, "ModelSettings", &i.ModelSettings) + delete(rawMsg, key) + case "primaryMetric": + err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &i.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &i.SweepSettings) + delete(rawMsg, key) + case "targetColumnName": + err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) + delete(rawMsg, key) + case "taskType": + err = unpopulate(val, "TaskType", &i.TaskType) + delete(rawMsg, key) + case "trainingData": + err = unpopulate(val, "TrainingData", &i.TrainingData) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &i.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageObjectDetectionBase. +func (i ImageObjectDetectionBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limitSettings", i.LimitSettings) + populate(objectMap, "modelSettings", i.ModelSettings) + populate(objectMap, "searchSpace", i.SearchSpace) + populate(objectMap, "sweepSettings", i.SweepSettings) + populate(objectMap, "validationData", i.ValidationData) + populate(objectMap, "validationDataSize", i.ValidationDataSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageObjectDetectionBase. +func (i *ImageObjectDetectionBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limitSettings": + err = unpopulate(val, "LimitSettings", &i.LimitSettings) + delete(rawMsg, key) + case "modelSettings": + err = unpopulate(val, "ModelSettings", &i.ModelSettings) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &i.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &i.SweepSettings) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &i.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageSweepSettings. +func (i ImageSweepSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "earlyTermination", i.EarlyTermination) + populate(objectMap, "samplingAlgorithm", i.SamplingAlgorithm) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageSweepSettings. +func (i *ImageSweepSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "earlyTermination": + i.EarlyTermination, err = unmarshalEarlyTerminationPolicyClassification(val) + delete(rawMsg, key) + case "samplingAlgorithm": + err = unpopulate(val, "SamplingAlgorithm", &i.SamplingAlgorithm) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageVertical. +func (i ImageVertical) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limitSettings", i.LimitSettings) + populate(objectMap, "sweepSettings", i.SweepSettings) + populate(objectMap, "validationData", i.ValidationData) + populate(objectMap, "validationDataSize", i.ValidationDataSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageVertical. +func (i *ImageVertical) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limitSettings": + err = unpopulate(val, "LimitSettings", &i.LimitSettings) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &i.SweepSettings) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &i.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImportDataAction. +func (i ImportDataAction) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["actionType"] = ScheduleActionTypeImportData + populate(objectMap, "dataImportDefinition", i.DataImportDefinition) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImportDataAction. +func (i *ImportDataAction) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "actionType": + err = unpopulate(val, "ActionType", &i.ActionType) + delete(rawMsg, key) + case "dataImportDefinition": + err = unpopulate(val, "DataImportDefinition", &i.DataImportDefinition) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type IndexColumn. +func (i IndexColumn) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "columnName", i.ColumnName) + populate(objectMap, "dataType", i.DataType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type IndexColumn. +func (i *IndexColumn) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "columnName": + err = unpopulate(val, "ColumnName", &i.ColumnName) + delete(rawMsg, key) + case "dataType": + err = unpopulate(val, "DataType", &i.DataType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type InferenceContainerProperties. +func (i InferenceContainerProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "livenessRoute", i.LivenessRoute) + populate(objectMap, "readinessRoute", i.ReadinessRoute) + populate(objectMap, "scoringRoute", i.ScoringRoute) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type InferenceContainerProperties. +func (i *InferenceContainerProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "livenessRoute": + err = unpopulate(val, "LivenessRoute", &i.LivenessRoute) + delete(rawMsg, key) + case "readinessRoute": + err = unpopulate(val, "ReadinessRoute", &i.ReadinessRoute) + delete(rawMsg, key) + case "scoringRoute": + err = unpopulate(val, "ScoringRoute", &i.ScoringRoute) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type InferencingServer. +func (i InferencingServer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["serverType"] = i.ServerType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type InferencingServer. +func (i *InferencingServer) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "serverType": + err = unpopulate(val, "ServerType", &i.ServerType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type InstanceTypeSchema. +func (i InstanceTypeSchema) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nodeSelector", i.NodeSelector) + populate(objectMap, "resources", i.Resources) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type InstanceTypeSchema. +func (i *InstanceTypeSchema) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nodeSelector": + err = unpopulate(val, "NodeSelector", &i.NodeSelector) + delete(rawMsg, key) + case "resources": + err = unpopulate(val, "Resources", &i.Resources) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type InstanceTypeSchemaResources. +func (i InstanceTypeSchemaResources) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "limits", i.Limits) + populate(objectMap, "requests", i.Requests) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type InstanceTypeSchemaResources. +func (i *InstanceTypeSchemaResources) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "limits": + err = unpopulate(val, "Limits", &i.Limits) + delete(rawMsg, key) + case "requests": + err = unpopulate(val, "Requests", &i.Requests) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type IntellectualProperty. +func (i IntellectualProperty) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "protectionLevel", i.ProtectionLevel) + populate(objectMap, "publisher", i.Publisher) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type IntellectualProperty. +func (i *IntellectualProperty) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "protectionLevel": + err = unpopulate(val, "ProtectionLevel", &i.ProtectionLevel) + delete(rawMsg, key) + case "publisher": + err = unpopulate(val, "Publisher", &i.Publisher) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobBase. +func (j JobBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", j.ID) + populate(objectMap, "name", j.Name) + populate(objectMap, "properties", j.Properties) + populate(objectMap, "systemData", j.SystemData) + populate(objectMap, "type", j.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobBase. +func (j *JobBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &j.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &j.Name) + delete(rawMsg, key) + case "properties": + j.Properties, err = unmarshalJobBasePropertiesClassification(val) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &j.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &j.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobBaseProperties. +func (j JobBaseProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "componentId", j.ComponentID) + populate(objectMap, "computeId", j.ComputeID) + populate(objectMap, "description", j.Description) + populate(objectMap, "displayName", j.DisplayName) + populate(objectMap, "experimentName", j.ExperimentName) + populate(objectMap, "identity", j.Identity) + populate(objectMap, "isArchived", j.IsArchived) + objectMap["jobType"] = j.JobType + populate(objectMap, "notificationSetting", j.NotificationSetting) + populate(objectMap, "properties", j.Properties) + populate(objectMap, "secretsConfiguration", j.SecretsConfiguration) + populate(objectMap, "services", j.Services) + populate(objectMap, "status", j.Status) + populate(objectMap, "tags", j.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobBaseProperties. +func (j *JobBaseProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "componentId": + err = unpopulate(val, "ComponentID", &j.ComponentID) + delete(rawMsg, key) + case "computeId": + err = unpopulate(val, "ComputeID", &j.ComputeID) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &j.Description) + delete(rawMsg, key) + case "displayName": + err = unpopulate(val, "DisplayName", &j.DisplayName) + delete(rawMsg, key) + case "experimentName": + err = unpopulate(val, "ExperimentName", &j.ExperimentName) + delete(rawMsg, key) + case "identity": + j.Identity, err = unmarshalIdentityConfigurationClassification(val) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &j.IsArchived) + delete(rawMsg, key) + case "jobType": + err = unpopulate(val, "JobType", &j.JobType) + delete(rawMsg, key) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &j.NotificationSetting) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &j.Properties) + delete(rawMsg, key) + case "secretsConfiguration": + err = unpopulate(val, "SecretsConfiguration", &j.SecretsConfiguration) + delete(rawMsg, key) + case "services": + err = unpopulate(val, "Services", &j.Services) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &j.Status) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &j.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobBaseResourceArmPaginatedResult. +func (j JobBaseResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", j.NextLink) + populate(objectMap, "value", j.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobBaseResourceArmPaginatedResult. +func (j *JobBaseResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &j.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &j.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobInput. +func (j JobInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", j.Description) + objectMap["jobInputType"] = j.JobInputType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobInput. +func (j *JobInput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &j.Description) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &j.JobInputType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobLimits. +func (j JobLimits) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["jobLimitsType"] = j.JobLimitsType + populate(objectMap, "timeout", j.Timeout) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobLimits. +func (j *JobLimits) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "jobLimitsType": + err = unpopulate(val, "JobLimitsType", &j.JobLimitsType) + delete(rawMsg, key) + case "timeout": + err = unpopulate(val, "Timeout", &j.Timeout) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobOutput. +func (j JobOutput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", j.Description) + objectMap["jobOutputType"] = j.JobOutputType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobOutput. +func (j *JobOutput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &j.Description) + delete(rawMsg, key) + case "jobOutputType": + err = unpopulate(val, "JobOutputType", &j.JobOutputType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobResourceConfiguration. +func (j JobResourceConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dockerArgs", j.DockerArgs) + populate(objectMap, "instanceCount", j.InstanceCount) + populate(objectMap, "instanceType", j.InstanceType) + populate(objectMap, "locations", j.Locations) + populate(objectMap, "maxInstanceCount", j.MaxInstanceCount) + populate(objectMap, "properties", j.Properties) + populate(objectMap, "shmSize", j.ShmSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobResourceConfiguration. +func (j *JobResourceConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dockerArgs": + err = unpopulate(val, "DockerArgs", &j.DockerArgs) + delete(rawMsg, key) + case "instanceCount": + err = unpopulate(val, "InstanceCount", &j.InstanceCount) + delete(rawMsg, key) + case "instanceType": + err = unpopulate(val, "InstanceType", &j.InstanceType) + delete(rawMsg, key) + case "locations": + err = unpopulate(val, "Locations", &j.Locations) + delete(rawMsg, key) + case "maxInstanceCount": + err = unpopulate(val, "MaxInstanceCount", &j.MaxInstanceCount) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &j.Properties) + delete(rawMsg, key) + case "shmSize": + err = unpopulate(val, "ShmSize", &j.ShmSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobScheduleAction. +func (j JobScheduleAction) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["actionType"] = ScheduleActionTypeCreateJob + populate(objectMap, "jobDefinition", j.JobDefinition) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobScheduleAction. +func (j *JobScheduleAction) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "actionType": + err = unpopulate(val, "ActionType", &j.ActionType) + delete(rawMsg, key) + case "jobDefinition": + j.JobDefinition, err = unmarshalJobBasePropertiesClassification(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JobService. +func (j JobService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "endpoint", j.Endpoint) + populate(objectMap, "errorMessage", j.ErrorMessage) + populate(objectMap, "jobServiceType", j.JobServiceType) + populate(objectMap, "nodes", j.Nodes) + populate(objectMap, "port", j.Port) + populate(objectMap, "properties", j.Properties) + populate(objectMap, "status", j.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JobService. +func (j *JobService) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endpoint": + err = unpopulate(val, "Endpoint", &j.Endpoint) + delete(rawMsg, key) + case "errorMessage": + err = unpopulate(val, "ErrorMessage", &j.ErrorMessage) + delete(rawMsg, key) + case "jobServiceType": + err = unpopulate(val, "JobServiceType", &j.JobServiceType) + delete(rawMsg, key) + case "nodes": + j.Nodes, err = unmarshalNodesClassification(val) + delete(rawMsg, key) + case "port": + err = unpopulate(val, "Port", &j.Port) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &j.Properties) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &j.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KerberosCredentials. +func (k KerberosCredentials) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "kerberosKdcAddress", k.KerberosKdcAddress) + populate(objectMap, "kerberosPrincipal", k.KerberosPrincipal) + populate(objectMap, "kerberosRealm", k.KerberosRealm) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KerberosCredentials. +func (k *KerberosCredentials) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "kerberosKdcAddress": + err = unpopulate(val, "KerberosKdcAddress", &k.KerberosKdcAddress) + delete(rawMsg, key) + case "kerberosPrincipal": + err = unpopulate(val, "KerberosPrincipal", &k.KerberosPrincipal) + delete(rawMsg, key) + case "kerberosRealm": + err = unpopulate(val, "KerberosRealm", &k.KerberosRealm) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KerberosKeytabCredentials. +func (k KerberosKeytabCredentials) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["credentialsType"] = CredentialsTypeKerberosKeytab + populate(objectMap, "kerberosKdcAddress", k.KerberosKdcAddress) + populate(objectMap, "kerberosPrincipal", k.KerberosPrincipal) + populate(objectMap, "kerberosRealm", k.KerberosRealm) + populate(objectMap, "secrets", k.Secrets) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KerberosKeytabCredentials. +func (k *KerberosKeytabCredentials) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "credentialsType": + err = unpopulate(val, "CredentialsType", &k.CredentialsType) + delete(rawMsg, key) + case "kerberosKdcAddress": + err = unpopulate(val, "KerberosKdcAddress", &k.KerberosKdcAddress) + delete(rawMsg, key) + case "kerberosPrincipal": + err = unpopulate(val, "KerberosPrincipal", &k.KerberosPrincipal) + delete(rawMsg, key) + case "kerberosRealm": + err = unpopulate(val, "KerberosRealm", &k.KerberosRealm) + delete(rawMsg, key) + case "secrets": + err = unpopulate(val, "Secrets", &k.Secrets) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KerberosKeytabSecrets. +func (k KerberosKeytabSecrets) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "kerberosKeytab", k.KerberosKeytab) + objectMap["secretsType"] = SecretsTypeKerberosKeytab + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KerberosKeytabSecrets. +func (k *KerberosKeytabSecrets) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "kerberosKeytab": + err = unpopulate(val, "KerberosKeytab", &k.KerberosKeytab) + delete(rawMsg, key) + case "secretsType": + err = unpopulate(val, "SecretsType", &k.SecretsType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KerberosPasswordCredentials. +func (k KerberosPasswordCredentials) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["credentialsType"] = CredentialsTypeKerberosPassword + populate(objectMap, "kerberosKdcAddress", k.KerberosKdcAddress) + populate(objectMap, "kerberosPrincipal", k.KerberosPrincipal) + populate(objectMap, "kerberosRealm", k.KerberosRealm) + populate(objectMap, "secrets", k.Secrets) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KerberosPasswordCredentials. +func (k *KerberosPasswordCredentials) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "credentialsType": + err = unpopulate(val, "CredentialsType", &k.CredentialsType) + delete(rawMsg, key) + case "kerberosKdcAddress": + err = unpopulate(val, "KerberosKdcAddress", &k.KerberosKdcAddress) + delete(rawMsg, key) + case "kerberosPrincipal": + err = unpopulate(val, "KerberosPrincipal", &k.KerberosPrincipal) + delete(rawMsg, key) + case "kerberosRealm": + err = unpopulate(val, "KerberosRealm", &k.KerberosRealm) + delete(rawMsg, key) + case "secrets": + err = unpopulate(val, "Secrets", &k.Secrets) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KerberosPasswordSecrets. +func (k KerberosPasswordSecrets) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "kerberosPassword", k.KerberosPassword) + objectMap["secretsType"] = SecretsTypeKerberosPassword + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KerberosPasswordSecrets. +func (k *KerberosPasswordSecrets) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "kerberosPassword": + err = unpopulate(val, "KerberosPassword", &k.KerberosPassword) + delete(rawMsg, key) + case "secretsType": + err = unpopulate(val, "SecretsType", &k.SecretsType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyVaultProperties. +func (k KeyVaultProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "identityClientId", k.IdentityClientID) + populate(objectMap, "keyIdentifier", k.KeyIdentifier) + populate(objectMap, "keyVaultArmId", k.KeyVaultArmID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyVaultProperties. +func (k *KeyVaultProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "identityClientId": + err = unpopulate(val, "IdentityClientID", &k.IdentityClientID) + delete(rawMsg, key) + case "keyIdentifier": + err = unpopulate(val, "KeyIdentifier", &k.KeyIdentifier) + delete(rawMsg, key) + case "keyVaultArmId": + err = unpopulate(val, "KeyVaultArmID", &k.KeyVaultArmID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Kubernetes. +func (k Kubernetes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "computeLocation", k.ComputeLocation) + objectMap["computeType"] = ComputeTypeKubernetes + populateTimeRFC3339(objectMap, "createdOn", k.CreatedOn) + populate(objectMap, "description", k.Description) + populate(objectMap, "disableLocalAuth", k.DisableLocalAuth) + populate(objectMap, "isAttachedCompute", k.IsAttachedCompute) + populateTimeRFC3339(objectMap, "modifiedOn", k.ModifiedOn) + populate(objectMap, "properties", k.Properties) + populate(objectMap, "provisioningErrors", k.ProvisioningErrors) + populate(objectMap, "provisioningState", k.ProvisioningState) + populate(objectMap, "resourceId", k.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Kubernetes. +func (k *Kubernetes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "computeLocation": + err = unpopulate(val, "ComputeLocation", &k.ComputeLocation) + delete(rawMsg, key) + case "computeType": + err = unpopulate(val, "ComputeType", &k.ComputeType) + delete(rawMsg, key) + case "createdOn": + err = unpopulateTimeRFC3339(val, "CreatedOn", &k.CreatedOn) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &k.Description) + delete(rawMsg, key) + case "disableLocalAuth": + err = unpopulate(val, "DisableLocalAuth", &k.DisableLocalAuth) + delete(rawMsg, key) + case "isAttachedCompute": + err = unpopulate(val, "IsAttachedCompute", &k.IsAttachedCompute) + delete(rawMsg, key) + case "modifiedOn": + err = unpopulateTimeRFC3339(val, "ModifiedOn", &k.ModifiedOn) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &k.Properties) + delete(rawMsg, key) + case "provisioningErrors": + err = unpopulate(val, "ProvisioningErrors", &k.ProvisioningErrors) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &k.ProvisioningState) + delete(rawMsg, key) + case "resourceId": + err = unpopulate(val, "ResourceID", &k.ResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KubernetesOnlineDeployment. +func (k KubernetesOnlineDeployment) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "appInsightsEnabled", k.AppInsightsEnabled) + populate(objectMap, "codeConfiguration", k.CodeConfiguration) + populate(objectMap, "containerResourceRequirements", k.ContainerResourceRequirements) + populate(objectMap, "dataCollector", k.DataCollector) + populate(objectMap, "description", k.Description) + populate(objectMap, "egressPublicNetworkAccess", k.EgressPublicNetworkAccess) + objectMap["endpointComputeType"] = EndpointComputeTypeKubernetes + populate(objectMap, "environmentId", k.EnvironmentID) + populate(objectMap, "environmentVariables", k.EnvironmentVariables) + populate(objectMap, "instanceType", k.InstanceType) + populate(objectMap, "livenessProbe", k.LivenessProbe) + populate(objectMap, "model", k.Model) + populate(objectMap, "modelMountPath", k.ModelMountPath) + populate(objectMap, "properties", k.Properties) + populate(objectMap, "provisioningState", k.ProvisioningState) + populate(objectMap, "readinessProbe", k.ReadinessProbe) + populate(objectMap, "requestSettings", k.RequestSettings) + populate(objectMap, "scaleSettings", k.ScaleSettings) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KubernetesOnlineDeployment. +func (k *KubernetesOnlineDeployment) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "appInsightsEnabled": + err = unpopulate(val, "AppInsightsEnabled", &k.AppInsightsEnabled) + delete(rawMsg, key) + case "codeConfiguration": + err = unpopulate(val, "CodeConfiguration", &k.CodeConfiguration) + delete(rawMsg, key) + case "containerResourceRequirements": + err = unpopulate(val, "ContainerResourceRequirements", &k.ContainerResourceRequirements) + delete(rawMsg, key) + case "dataCollector": + err = unpopulate(val, "DataCollector", &k.DataCollector) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &k.Description) + delete(rawMsg, key) + case "egressPublicNetworkAccess": + err = unpopulate(val, "EgressPublicNetworkAccess", &k.EgressPublicNetworkAccess) + delete(rawMsg, key) + case "endpointComputeType": + err = unpopulate(val, "EndpointComputeType", &k.EndpointComputeType) + delete(rawMsg, key) + case "environmentId": + err = unpopulate(val, "EnvironmentID", &k.EnvironmentID) + delete(rawMsg, key) + case "environmentVariables": + err = unpopulate(val, "EnvironmentVariables", &k.EnvironmentVariables) + delete(rawMsg, key) + case "instanceType": + err = unpopulate(val, "InstanceType", &k.InstanceType) + delete(rawMsg, key) + case "livenessProbe": + err = unpopulate(val, "LivenessProbe", &k.LivenessProbe) + delete(rawMsg, key) + case "model": + err = unpopulate(val, "Model", &k.Model) + delete(rawMsg, key) + case "modelMountPath": + err = unpopulate(val, "ModelMountPath", &k.ModelMountPath) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &k.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &k.ProvisioningState) + delete(rawMsg, key) + case "readinessProbe": + err = unpopulate(val, "ReadinessProbe", &k.ReadinessProbe) + delete(rawMsg, key) + case "requestSettings": + err = unpopulate(val, "RequestSettings", &k.RequestSettings) + delete(rawMsg, key) + case "scaleSettings": + k.ScaleSettings, err = unmarshalOnlineScaleSettingsClassification(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KubernetesProperties. +func (k KubernetesProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "defaultInstanceType", k.DefaultInstanceType) + populate(objectMap, "extensionInstanceReleaseTrain", k.ExtensionInstanceReleaseTrain) + populate(objectMap, "extensionPrincipalId", k.ExtensionPrincipalID) + populate(objectMap, "instanceTypes", k.InstanceTypes) + populate(objectMap, "namespace", k.Namespace) + populate(objectMap, "relayConnectionString", k.RelayConnectionString) + populate(objectMap, "serviceBusConnectionString", k.ServiceBusConnectionString) + populate(objectMap, "vcName", k.VcName) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KubernetesProperties. +func (k *KubernetesProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "defaultInstanceType": + err = unpopulate(val, "DefaultInstanceType", &k.DefaultInstanceType) + delete(rawMsg, key) + case "extensionInstanceReleaseTrain": + err = unpopulate(val, "ExtensionInstanceReleaseTrain", &k.ExtensionInstanceReleaseTrain) + delete(rawMsg, key) + case "extensionPrincipalId": + err = unpopulate(val, "ExtensionPrincipalID", &k.ExtensionPrincipalID) + delete(rawMsg, key) + case "instanceTypes": + err = unpopulate(val, "InstanceTypes", &k.InstanceTypes) + delete(rawMsg, key) + case "namespace": + err = unpopulate(val, "Namespace", &k.Namespace) + delete(rawMsg, key) + case "relayConnectionString": + err = unpopulate(val, "RelayConnectionString", &k.RelayConnectionString) + delete(rawMsg, key) + case "serviceBusConnectionString": + err = unpopulate(val, "ServiceBusConnectionString", &k.ServiceBusConnectionString) + delete(rawMsg, key) + case "vcName": + err = unpopulate(val, "VcName", &k.VcName) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KubernetesSchema. +func (k KubernetesSchema) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "properties", k.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KubernetesSchema. +func (k *KubernetesSchema) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "properties": + err = unpopulate(val, "Properties", &k.Properties) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelCategory. +func (l LabelCategory) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "classes", l.Classes) + populate(objectMap, "displayName", l.DisplayName) + populate(objectMap, "multiSelect", l.MultiSelect) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelCategory. +func (l *LabelCategory) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "classes": + err = unpopulate(val, "Classes", &l.Classes) + delete(rawMsg, key) + case "displayName": + err = unpopulate(val, "DisplayName", &l.DisplayName) + delete(rawMsg, key) + case "multiSelect": + err = unpopulate(val, "MultiSelect", &l.MultiSelect) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelClass. +func (l LabelClass) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "displayName", l.DisplayName) + populate(objectMap, "subclasses", l.Subclasses) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelClass. +func (l *LabelClass) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "displayName": + err = unpopulate(val, "DisplayName", &l.DisplayName) + delete(rawMsg, key) + case "subclasses": + err = unpopulate(val, "Subclasses", &l.Subclasses) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingDataConfiguration. +func (l LabelingDataConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dataId", l.DataID) + populate(objectMap, "incrementalDataRefresh", l.IncrementalDataRefresh) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingDataConfiguration. +func (l *LabelingDataConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataId": + err = unpopulate(val, "DataID", &l.DataID) + delete(rawMsg, key) + case "incrementalDataRefresh": + err = unpopulate(val, "IncrementalDataRefresh", &l.IncrementalDataRefresh) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingJob. +func (l LabelingJob) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", l.ID) + populate(objectMap, "name", l.Name) + populate(objectMap, "properties", l.Properties) + populate(objectMap, "systemData", l.SystemData) + populate(objectMap, "type", l.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJob. +func (l *LabelingJob) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &l.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &l.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &l.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &l.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &l.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingJobImageProperties. +func (l LabelingJobImageProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "annotationType", l.AnnotationType) + objectMap["mediaType"] = MediaTypeImage + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJobImageProperties. +func (l *LabelingJobImageProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "annotationType": + err = unpopulate(val, "AnnotationType", &l.AnnotationType) + delete(rawMsg, key) + case "mediaType": + err = unpopulate(val, "MediaType", &l.MediaType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingJobInstructions. +func (l LabelingJobInstructions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "uri", l.URI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJobInstructions. +func (l *LabelingJobInstructions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "uri": + err = unpopulate(val, "URI", &l.URI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingJobMediaProperties. +func (l LabelingJobMediaProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mediaType"] = l.MediaType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJobMediaProperties. +func (l *LabelingJobMediaProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mediaType": + err = unpopulate(val, "MediaType", &l.MediaType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingJobProperties. +func (l LabelingJobProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "componentId", l.ComponentID) + populate(objectMap, "computeId", l.ComputeID) + populateTimeRFC3339(objectMap, "createdDateTime", l.CreatedDateTime) + populate(objectMap, "dataConfiguration", l.DataConfiguration) + populate(objectMap, "description", l.Description) + populate(objectMap, "displayName", l.DisplayName) + populate(objectMap, "experimentName", l.ExperimentName) + populate(objectMap, "identity", l.Identity) + populate(objectMap, "isArchived", l.IsArchived) + populate(objectMap, "jobInstructions", l.JobInstructions) + objectMap["jobType"] = JobTypeLabeling + populate(objectMap, "labelCategories", l.LabelCategories) + populate(objectMap, "labelingJobMediaProperties", l.LabelingJobMediaProperties) + populate(objectMap, "mlAssistConfiguration", l.MlAssistConfiguration) + populate(objectMap, "notificationSetting", l.NotificationSetting) + populate(objectMap, "progressMetrics", l.ProgressMetrics) + populate(objectMap, "projectId", l.ProjectID) + populate(objectMap, "properties", l.Properties) + populate(objectMap, "provisioningState", l.ProvisioningState) + populate(objectMap, "secretsConfiguration", l.SecretsConfiguration) + populate(objectMap, "services", l.Services) + populate(objectMap, "status", l.Status) + populate(objectMap, "statusMessages", l.StatusMessages) + populate(objectMap, "tags", l.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJobProperties. +func (l *LabelingJobProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "componentId": + err = unpopulate(val, "ComponentID", &l.ComponentID) + delete(rawMsg, key) + case "computeId": + err = unpopulate(val, "ComputeID", &l.ComputeID) + delete(rawMsg, key) + case "createdDateTime": + err = unpopulateTimeRFC3339(val, "CreatedDateTime", &l.CreatedDateTime) + delete(rawMsg, key) + case "dataConfiguration": + err = unpopulate(val, "DataConfiguration", &l.DataConfiguration) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &l.Description) + delete(rawMsg, key) + case "displayName": + err = unpopulate(val, "DisplayName", &l.DisplayName) + delete(rawMsg, key) + case "experimentName": + err = unpopulate(val, "ExperimentName", &l.ExperimentName) + delete(rawMsg, key) + case "identity": + l.Identity, err = unmarshalIdentityConfigurationClassification(val) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &l.IsArchived) + delete(rawMsg, key) + case "jobInstructions": + err = unpopulate(val, "JobInstructions", &l.JobInstructions) + delete(rawMsg, key) + case "jobType": + err = unpopulate(val, "JobType", &l.JobType) + delete(rawMsg, key) + case "labelCategories": + err = unpopulate(val, "LabelCategories", &l.LabelCategories) + delete(rawMsg, key) + case "labelingJobMediaProperties": + l.LabelingJobMediaProperties, err = unmarshalLabelingJobMediaPropertiesClassification(val) + delete(rawMsg, key) + case "mlAssistConfiguration": + l.MlAssistConfiguration, err = unmarshalMLAssistConfigurationClassification(val) + delete(rawMsg, key) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &l.NotificationSetting) + delete(rawMsg, key) + case "progressMetrics": + err = unpopulate(val, "ProgressMetrics", &l.ProgressMetrics) + delete(rawMsg, key) + case "projectId": + err = unpopulate(val, "ProjectID", &l.ProjectID) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &l.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &l.ProvisioningState) + delete(rawMsg, key) + case "secretsConfiguration": + err = unpopulate(val, "SecretsConfiguration", &l.SecretsConfiguration) + delete(rawMsg, key) + case "services": + err = unpopulate(val, "Services", &l.Services) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &l.Status) + delete(rawMsg, key) + case "statusMessages": + err = unpopulate(val, "StatusMessages", &l.StatusMessages) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &l.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingJobResourceArmPaginatedResult. +func (l LabelingJobResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJobResourceArmPaginatedResult. +func (l *LabelingJobResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LabelingJobTextProperties. +func (l LabelingJobTextProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "annotationType", l.AnnotationType) + objectMap["mediaType"] = MediaTypeText + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJobTextProperties. +func (l *LabelingJobTextProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "annotationType": + err = unpopulate(val, "AnnotationType", &l.AnnotationType) + delete(rawMsg, key) + case "mediaType": + err = unpopulate(val, "MediaType", &l.MediaType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LakeHouseArtifact. +func (l LakeHouseArtifact) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "artifactName", l.ArtifactName) + objectMap["artifactType"] = OneLakeArtifactTypeLakeHouse + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LakeHouseArtifact. +func (l *LakeHouseArtifact) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "artifactName": + err = unpopulate(val, "ArtifactName", &l.ArtifactName) + delete(rawMsg, key) + case "artifactType": + err = unpopulate(val, "ArtifactType", &l.ArtifactType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListAmlUserFeatureResult. +func (l ListAmlUserFeatureResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListAmlUserFeatureResult. +func (l *ListAmlUserFeatureResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListNotebookKeysResult. +func (l ListNotebookKeysResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "primaryAccessKey", l.PrimaryAccessKey) + populate(objectMap, "secondaryAccessKey", l.SecondaryAccessKey) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListNotebookKeysResult. +func (l *ListNotebookKeysResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "primaryAccessKey": + err = unpopulate(val, "PrimaryAccessKey", &l.PrimaryAccessKey) + delete(rawMsg, key) + case "secondaryAccessKey": + err = unpopulate(val, "SecondaryAccessKey", &l.SecondaryAccessKey) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListStorageAccountKeysResult. +func (l ListStorageAccountKeysResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "userStorageKey", l.UserStorageKey) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListStorageAccountKeysResult. +func (l *ListStorageAccountKeysResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "userStorageKey": + err = unpopulate(val, "UserStorageKey", &l.UserStorageKey) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListUsagesResult. +func (l ListUsagesResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListUsagesResult. +func (l *ListUsagesResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListWorkspaceKeysResult. +func (l ListWorkspaceKeysResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "appInsightsInstrumentationKey", l.AppInsightsInstrumentationKey) + populate(objectMap, "containerRegistryCredentials", l.ContainerRegistryCredentials) + populate(objectMap, "notebookAccessKeys", l.NotebookAccessKeys) + populate(objectMap, "userStorageArmId", l.UserStorageArmID) + populate(objectMap, "userStorageKey", l.UserStorageKey) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListWorkspaceKeysResult. +func (l *ListWorkspaceKeysResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "appInsightsInstrumentationKey": + err = unpopulate(val, "AppInsightsInstrumentationKey", &l.AppInsightsInstrumentationKey) + delete(rawMsg, key) + case "containerRegistryCredentials": + err = unpopulate(val, "ContainerRegistryCredentials", &l.ContainerRegistryCredentials) + delete(rawMsg, key) + case "notebookAccessKeys": + err = unpopulate(val, "NotebookAccessKeys", &l.NotebookAccessKeys) + delete(rawMsg, key) + case "userStorageArmId": + err = unpopulate(val, "UserStorageArmID", &l.UserStorageArmID) + delete(rawMsg, key) + case "userStorageKey": + err = unpopulate(val, "UserStorageKey", &l.UserStorageKey) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListWorkspaceQuotas. +func (l ListWorkspaceQuotas) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListWorkspaceQuotas. +func (l *ListWorkspaceQuotas) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LiteralJobInput. +func (l LiteralJobInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", l.Description) + objectMap["jobInputType"] = JobInputTypeLiteral + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LiteralJobInput. +func (l *LiteralJobInput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &l.Description) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &l.JobInputType) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLAssistConfiguration. +func (m MLAssistConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mlAssist"] = m.MlAssist + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLAssistConfiguration. +func (m *MLAssistConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mlAssist": + err = unpopulate(val, "MlAssist", &m.MlAssist) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLAssistConfigurationDisabled. +func (m MLAssistConfigurationDisabled) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mlAssist"] = MLAssistConfigurationTypeDisabled + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLAssistConfigurationDisabled. +func (m *MLAssistConfigurationDisabled) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mlAssist": + err = unpopulate(val, "MlAssist", &m.MlAssist) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLAssistConfigurationEnabled. +func (m MLAssistConfigurationEnabled) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "inferencingComputeBinding", m.InferencingComputeBinding) + objectMap["mlAssist"] = MLAssistConfigurationTypeEnabled + populate(objectMap, "trainingComputeBinding", m.TrainingComputeBinding) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLAssistConfigurationEnabled. +func (m *MLAssistConfigurationEnabled) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "inferencingComputeBinding": + err = unpopulate(val, "InferencingComputeBinding", &m.InferencingComputeBinding) + delete(rawMsg, key) + case "mlAssist": + err = unpopulate(val, "MlAssist", &m.MlAssist) + delete(rawMsg, key) + case "trainingComputeBinding": + err = unpopulate(val, "TrainingComputeBinding", &m.TrainingComputeBinding) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLFlowModelJobInput. +func (m MLFlowModelJobInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", m.Description) + objectMap["jobInputType"] = JobInputTypeMlflowModel + populate(objectMap, "mode", m.Mode) + populate(objectMap, "uri", m.URI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLFlowModelJobInput. +func (m *MLFlowModelJobInput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &m.JobInputType) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &m.URI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLFlowModelJobOutput. +func (m MLFlowModelJobOutput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "assetName", m.AssetName) + populate(objectMap, "assetVersion", m.AssetVersion) + populate(objectMap, "autoDeleteSetting", m.AutoDeleteSetting) + populate(objectMap, "description", m.Description) + objectMap["jobOutputType"] = JobOutputTypeMlflowModel + populate(objectMap, "mode", m.Mode) + populate(objectMap, "uri", m.URI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLFlowModelJobOutput. +func (m *MLFlowModelJobOutput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "assetName": + err = unpopulate(val, "AssetName", &m.AssetName) + delete(rawMsg, key) + case "assetVersion": + err = unpopulate(val, "AssetVersion", &m.AssetVersion) + delete(rawMsg, key) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &m.AutoDeleteSetting) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "jobOutputType": + err = unpopulate(val, "JobOutputType", &m.JobOutputType) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &m.URI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLTableData. +func (m MLTableData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", m.AutoDeleteSetting) + objectMap["dataType"] = DataTypeMltable + populate(objectMap, "dataUri", m.DataURI) + populate(objectMap, "description", m.Description) + populate(objectMap, "intellectualProperty", m.IntellectualProperty) + populate(objectMap, "isAnonymous", m.IsAnonymous) + populate(objectMap, "isArchived", m.IsArchived) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "referencedUris", m.ReferencedUris) + populate(objectMap, "stage", m.Stage) + populate(objectMap, "tags", m.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLTableData. +func (m *MLTableData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &m.AutoDeleteSetting) + delete(rawMsg, key) + case "dataType": + err = unpopulate(val, "DataType", &m.DataType) + delete(rawMsg, key) + case "dataUri": + err = unpopulate(val, "DataURI", &m.DataURI) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &m.IntellectualProperty) + delete(rawMsg, key) + case "isAnonymous": + err = unpopulate(val, "IsAnonymous", &m.IsAnonymous) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &m.IsArchived) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "referencedUris": + err = unpopulate(val, "ReferencedUris", &m.ReferencedUris) + delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &m.Stage) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &m.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLTableJobInput. +func (m MLTableJobInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", m.Description) + objectMap["jobInputType"] = JobInputTypeMltable + populate(objectMap, "mode", m.Mode) + populate(objectMap, "uri", m.URI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLTableJobInput. +func (m *MLTableJobInput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &m.JobInputType) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &m.URI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MLTableJobOutput. +func (m MLTableJobOutput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "assetName", m.AssetName) + populate(objectMap, "assetVersion", m.AssetVersion) + populate(objectMap, "autoDeleteSetting", m.AutoDeleteSetting) + populate(objectMap, "description", m.Description) + objectMap["jobOutputType"] = JobOutputTypeMltable + populate(objectMap, "mode", m.Mode) + populate(objectMap, "uri", m.URI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MLTableJobOutput. +func (m *MLTableJobOutput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "assetName": + err = unpopulate(val, "AssetName", &m.AssetName) + delete(rawMsg, key) + case "assetVersion": + err = unpopulate(val, "AssetVersion", &m.AssetVersion) + delete(rawMsg, key) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &m.AutoDeleteSetting) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "jobOutputType": + err = unpopulate(val, "JobOutputType", &m.JobOutputType) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &m.URI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedComputeIdentity. +func (m ManagedComputeIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["computeIdentityType"] = MonitorComputeIdentityTypeManagedIdentity + populate(objectMap, "identity", m.Identity) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedComputeIdentity. +func (m *ManagedComputeIdentity) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "computeIdentityType": + err = unpopulate(val, "ComputeIdentityType", &m.ComputeIdentityType) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &m.Identity) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedIdentity. +func (m ManagedIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "clientId", m.ClientID) + objectMap["identityType"] = IdentityConfigurationTypeManaged + populate(objectMap, "objectId", m.ObjectID) + populate(objectMap, "resourceId", m.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedIdentity. +func (m *ManagedIdentity) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "clientId": + err = unpopulate(val, "ClientID", &m.ClientID) + delete(rawMsg, key) + case "identityType": + err = unpopulate(val, "IdentityType", &m.IdentityType) + delete(rawMsg, key) + case "objectId": + err = unpopulate(val, "ObjectID", &m.ObjectID) + delete(rawMsg, key) + case "resourceId": + err = unpopulate(val, "ResourceID", &m.ResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedIdentityAuthTypeWorkspaceConnectionProperties. +func (m ManagedIdentityAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["authType"] = ConnectionAuthTypeManagedIdentity + populate(objectMap, "category", m.Category) + populate(objectMap, "credentials", m.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", m.ExpiryTime) + populateAny(objectMap, "metadata", m.Metadata) + populate(objectMap, "target", m.Target) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedIdentityAuthTypeWorkspaceConnectionProperties. +func (m *ManagedIdentityAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "authType": + err = unpopulate(val, "AuthType", &m.AuthType) + delete(rawMsg, key) + case "category": + err = unpopulate(val, "Category", &m.Category) + delete(rawMsg, key) + case "credentials": + err = unpopulate(val, "Credentials", &m.Credentials) + delete(rawMsg, key) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &m.ExpiryTime) + delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &m.Metadata) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &m.Target) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedNetworkProvisionOptions. +func (m ManagedNetworkProvisionOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "includeSpark", m.IncludeSpark) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedNetworkProvisionOptions. +func (m *ManagedNetworkProvisionOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "includeSpark": + err = unpopulate(val, "IncludeSpark", &m.IncludeSpark) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedNetworkProvisionStatus. +func (m ManagedNetworkProvisionStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "sparkReady", m.SparkReady) + populate(objectMap, "status", m.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedNetworkProvisionStatus. +func (m *ManagedNetworkProvisionStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "sparkReady": + err = unpopulate(val, "SparkReady", &m.SparkReady) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &m.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedNetworkSettings. +func (m ManagedNetworkSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "isolationMode", m.IsolationMode) + populate(objectMap, "networkId", m.NetworkID) + populate(objectMap, "outboundRules", m.OutboundRules) + populate(objectMap, "status", m.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedNetworkSettings. +func (m *ManagedNetworkSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "isolationMode": + err = unpopulate(val, "IsolationMode", &m.IsolationMode) + delete(rawMsg, key) + case "networkId": + err = unpopulate(val, "NetworkID", &m.NetworkID) + delete(rawMsg, key) + case "outboundRules": + m.OutboundRules, err = unmarshalOutboundRuleClassificationMap(val) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &m.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedOnlineDeployment. +func (m ManagedOnlineDeployment) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "appInsightsEnabled", m.AppInsightsEnabled) + populate(objectMap, "codeConfiguration", m.CodeConfiguration) + populate(objectMap, "dataCollector", m.DataCollector) + populate(objectMap, "description", m.Description) + populate(objectMap, "egressPublicNetworkAccess", m.EgressPublicNetworkAccess) + objectMap["endpointComputeType"] = EndpointComputeTypeManaged + populate(objectMap, "environmentId", m.EnvironmentID) + populate(objectMap, "environmentVariables", m.EnvironmentVariables) + populate(objectMap, "instanceType", m.InstanceType) + populate(objectMap, "livenessProbe", m.LivenessProbe) + populate(objectMap, "model", m.Model) + populate(objectMap, "modelMountPath", m.ModelMountPath) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "provisioningState", m.ProvisioningState) + populate(objectMap, "readinessProbe", m.ReadinessProbe) + populate(objectMap, "requestSettings", m.RequestSettings) + populate(objectMap, "scaleSettings", m.ScaleSettings) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedOnlineDeployment. +func (m *ManagedOnlineDeployment) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "appInsightsEnabled": + err = unpopulate(val, "AppInsightsEnabled", &m.AppInsightsEnabled) + delete(rawMsg, key) + case "codeConfiguration": + err = unpopulate(val, "CodeConfiguration", &m.CodeConfiguration) + delete(rawMsg, key) + case "dataCollector": + err = unpopulate(val, "DataCollector", &m.DataCollector) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "egressPublicNetworkAccess": + err = unpopulate(val, "EgressPublicNetworkAccess", &m.EgressPublicNetworkAccess) + delete(rawMsg, key) + case "endpointComputeType": + err = unpopulate(val, "EndpointComputeType", &m.EndpointComputeType) + delete(rawMsg, key) + case "environmentId": + err = unpopulate(val, "EnvironmentID", &m.EnvironmentID) + delete(rawMsg, key) + case "environmentVariables": + err = unpopulate(val, "EnvironmentVariables", &m.EnvironmentVariables) + delete(rawMsg, key) + case "instanceType": + err = unpopulate(val, "InstanceType", &m.InstanceType) + delete(rawMsg, key) + case "livenessProbe": + err = unpopulate(val, "LivenessProbe", &m.LivenessProbe) + delete(rawMsg, key) + case "model": + err = unpopulate(val, "Model", &m.Model) + delete(rawMsg, key) + case "modelMountPath": + err = unpopulate(val, "ModelMountPath", &m.ModelMountPath) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &m.ProvisioningState) + delete(rawMsg, key) + case "readinessProbe": + err = unpopulate(val, "ReadinessProbe", &m.ReadinessProbe) + delete(rawMsg, key) + case "requestSettings": + err = unpopulate(val, "RequestSettings", &m.RequestSettings) + delete(rawMsg, key) + case "scaleSettings": + m.ScaleSettings, err = unmarshalOnlineScaleSettingsClassification(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedServiceIdentity. +func (m ManagedServiceIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "principalId", m.PrincipalID) + populate(objectMap, "tenantId", m.TenantID) + populate(objectMap, "type", m.Type) + populate(objectMap, "userAssignedIdentities", m.UserAssignedIdentities) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedServiceIdentity. +func (m *ManagedServiceIdentity) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "principalId": + err = unpopulate(val, "PrincipalID", &m.PrincipalID) + delete(rawMsg, key) + case "tenantId": + err = unpopulate(val, "TenantID", &m.TenantID) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &m.Type) + delete(rawMsg, key) + case "userAssignedIdentities": + err = unpopulate(val, "UserAssignedIdentities", &m.UserAssignedIdentities) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MaterializationComputeResource. +func (m MaterializationComputeResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "instanceType", m.InstanceType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MaterializationComputeResource. +func (m *MaterializationComputeResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "instanceType": + err = unpopulate(val, "InstanceType", &m.InstanceType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MaterializationSettings. +func (m MaterializationSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "notification", m.Notification) + populate(objectMap, "resource", m.Resource) + populate(objectMap, "schedule", m.Schedule) + populate(objectMap, "sparkConfiguration", m.SparkConfiguration) + populate(objectMap, "storeType", m.StoreType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MaterializationSettings. +func (m *MaterializationSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "notification": + err = unpopulate(val, "Notification", &m.Notification) + delete(rawMsg, key) + case "resource": + err = unpopulate(val, "Resource", &m.Resource) + delete(rawMsg, key) + case "schedule": + err = unpopulate(val, "Schedule", &m.Schedule) + delete(rawMsg, key) + case "sparkConfiguration": + err = unpopulate(val, "SparkConfiguration", &m.SparkConfiguration) + delete(rawMsg, key) + case "storeType": + err = unpopulate(val, "StoreType", &m.StoreType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MedianStoppingPolicy. +func (m MedianStoppingPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "delayEvaluation", m.DelayEvaluation) + populate(objectMap, "evaluationInterval", m.EvaluationInterval) + objectMap["policyType"] = EarlyTerminationPolicyTypeMedianStopping + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MedianStoppingPolicy. +func (m *MedianStoppingPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "delayEvaluation": + err = unpopulate(val, "DelayEvaluation", &m.DelayEvaluation) + delete(rawMsg, key) + case "evaluationInterval": + err = unpopulate(val, "EvaluationInterval", &m.EvaluationInterval) + delete(rawMsg, key) + case "policyType": + err = unpopulate(val, "PolicyType", &m.PolicyType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelConfiguration. +func (m ModelConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "mode", m.Mode) + populate(objectMap, "mountPath", m.MountPath) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelConfiguration. +func (m *ModelConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "mountPath": + err = unpopulate(val, "MountPath", &m.MountPath) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelContainer. +func (m ModelContainer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", m.ID) + populate(objectMap, "name", m.Name) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "systemData", m.SystemData) + populate(objectMap, "type", m.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelContainer. +func (m *ModelContainer) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &m.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &m.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &m.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &m.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelContainerProperties. +func (m ModelContainerProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", m.Description) + populate(objectMap, "isArchived", m.IsArchived) + populate(objectMap, "latestVersion", m.LatestVersion) + populate(objectMap, "nextVersion", m.NextVersion) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "provisioningState", m.ProvisioningState) + populate(objectMap, "tags", m.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelContainerProperties. +func (m *ModelContainerProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &m.IsArchived) + delete(rawMsg, key) + case "latestVersion": + err = unpopulate(val, "LatestVersion", &m.LatestVersion) + delete(rawMsg, key) + case "nextVersion": + err = unpopulate(val, "NextVersion", &m.NextVersion) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &m.ProvisioningState) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &m.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelContainerResourceArmPaginatedResult. +func (m ModelContainerResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", m.NextLink) + populate(objectMap, "value", m.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelContainerResourceArmPaginatedResult. +func (m *ModelContainerResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &m.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &m.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelPackageInput. +func (m ModelPackageInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "inputType", m.InputType) + populate(objectMap, "mode", m.Mode) + populate(objectMap, "mountPath", m.MountPath) + populate(objectMap, "path", m.Path) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelPackageInput. +func (m *ModelPackageInput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "inputType": + err = unpopulate(val, "InputType", &m.InputType) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "mountPath": + err = unpopulate(val, "MountPath", &m.MountPath) + delete(rawMsg, key) + case "path": + m.Path, err = unmarshalPackageInputPathBaseClassification(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelPerformanceMetricThresholdBase. +func (m ModelPerformanceMetricThresholdBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["modelType"] = m.ModelType + populate(objectMap, "threshold", m.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelPerformanceMetricThresholdBase. +func (m *ModelPerformanceMetricThresholdBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "modelType": + err = unpopulate(val, "ModelType", &m.ModelType) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &m.Threshold) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelPerformanceSignal. +func (m ModelPerformanceSignal) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dataSegment", m.DataSegment) + populate(objectMap, "metricThreshold", m.MetricThreshold) + populate(objectMap, "mode", m.Mode) + populate(objectMap, "productionData", m.ProductionData) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "referenceData", m.ReferenceData) + objectMap["signalType"] = MonitoringSignalTypeModelPerformance + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelPerformanceSignal. +func (m *ModelPerformanceSignal) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataSegment": + err = unpopulate(val, "DataSegment", &m.DataSegment) + delete(rawMsg, key) + case "metricThreshold": + m.MetricThreshold, err = unmarshalModelPerformanceMetricThresholdBaseClassification(val) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "productionData": + m.ProductionData, err = unmarshalMonitoringInputDataBaseClassificationArray(val) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "referenceData": + m.ReferenceData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "signalType": + err = unpopulate(val, "SignalType", &m.SignalType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelVersion. +func (m ModelVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", m.ID) + populate(objectMap, "name", m.Name) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "systemData", m.SystemData) + populate(objectMap, "type", m.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelVersion. +func (m *ModelVersion) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &m.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &m.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &m.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &m.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelVersionProperties. +func (m ModelVersionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", m.AutoDeleteSetting) + populate(objectMap, "description", m.Description) + populate(objectMap, "flavors", m.Flavors) + populate(objectMap, "intellectualProperty", m.IntellectualProperty) + populate(objectMap, "isAnonymous", m.IsAnonymous) + populate(objectMap, "isArchived", m.IsArchived) + populate(objectMap, "jobName", m.JobName) + populate(objectMap, "modelType", m.ModelType) + populate(objectMap, "modelUri", m.ModelURI) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "provisioningState", m.ProvisioningState) + populate(objectMap, "stage", m.Stage) + populate(objectMap, "tags", m.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelVersionProperties. +func (m *ModelVersionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &m.AutoDeleteSetting) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &m.Description) + delete(rawMsg, key) + case "flavors": + err = unpopulate(val, "Flavors", &m.Flavors) + delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &m.IntellectualProperty) + delete(rawMsg, key) + case "isAnonymous": + err = unpopulate(val, "IsAnonymous", &m.IsAnonymous) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &m.IsArchived) + delete(rawMsg, key) + case "jobName": + err = unpopulate(val, "JobName", &m.JobName) + delete(rawMsg, key) + case "modelType": + err = unpopulate(val, "ModelType", &m.ModelType) + delete(rawMsg, key) + case "modelUri": + err = unpopulate(val, "ModelURI", &m.ModelURI) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &m.ProvisioningState) + delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &m.Stage) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &m.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ModelVersionResourceArmPaginatedResult. +func (m ModelVersionResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", m.NextLink) + populate(objectMap, "value", m.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ModelVersionResourceArmPaginatedResult. +func (m *ModelVersionResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &m.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &m.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitorComputeConfigurationBase. +func (m MonitorComputeConfigurationBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["computeType"] = m.ComputeType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitorComputeConfigurationBase. +func (m *MonitorComputeConfigurationBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "computeType": + err = unpopulate(val, "ComputeType", &m.ComputeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitorComputeIdentityBase. +func (m MonitorComputeIdentityBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["computeIdentityType"] = m.ComputeIdentityType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitorComputeIdentityBase. +func (m *MonitorComputeIdentityBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "computeIdentityType": + err = unpopulate(val, "ComputeIdentityType", &m.ComputeIdentityType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitorDefinition. +func (m MonitorDefinition) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alertNotificationSetting", m.AlertNotificationSetting) + populate(objectMap, "computeConfiguration", m.ComputeConfiguration) + populate(objectMap, "monitoringTarget", m.MonitoringTarget) + populate(objectMap, "signals", m.Signals) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitorDefinition. +func (m *MonitorDefinition) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alertNotificationSetting": + m.AlertNotificationSetting, err = unmarshalMonitoringAlertNotificationSettingsBaseClassification(val) + delete(rawMsg, key) + case "computeConfiguration": + m.ComputeConfiguration, err = unmarshalMonitorComputeConfigurationBaseClassification(val) + delete(rawMsg, key) + case "monitoringTarget": + err = unpopulate(val, "MonitoringTarget", &m.MonitoringTarget) + delete(rawMsg, key) + case "signals": + m.Signals, err = unmarshalMonitoringSignalBaseClassificationMap(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitorServerlessSparkCompute. +func (m MonitorServerlessSparkCompute) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "computeIdentity", m.ComputeIdentity) + objectMap["computeType"] = MonitorComputeTypeServerlessSpark + populate(objectMap, "instanceType", m.InstanceType) + populate(objectMap, "runtimeVersion", m.RuntimeVersion) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitorServerlessSparkCompute. +func (m *MonitorServerlessSparkCompute) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "computeIdentity": + m.ComputeIdentity, err = unmarshalMonitorComputeIdentityBaseClassification(val) + delete(rawMsg, key) + case "computeType": + err = unpopulate(val, "ComputeType", &m.ComputeType) + delete(rawMsg, key) + case "instanceType": + err = unpopulate(val, "InstanceType", &m.InstanceType) + delete(rawMsg, key) + case "runtimeVersion": + err = unpopulate(val, "RuntimeVersion", &m.RuntimeVersion) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringAlertNotificationSettingsBase. +func (m MonitoringAlertNotificationSettingsBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["alertNotificationType"] = m.AlertNotificationType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringAlertNotificationSettingsBase. +func (m *MonitoringAlertNotificationSettingsBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alertNotificationType": + err = unpopulate(val, "AlertNotificationType", &m.AlertNotificationType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringDataSegment. +func (m MonitoringDataSegment) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "feature", m.Feature) + populate(objectMap, "values", m.Values) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringDataSegment. +func (m *MonitoringDataSegment) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "feature": + err = unpopulate(val, "Feature", &m.Feature) + delete(rawMsg, key) + case "values": + err = unpopulate(val, "Values", &m.Values) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringFeatureFilterBase. +func (m MonitoringFeatureFilterBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["filterType"] = m.FilterType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringFeatureFilterBase. +func (m *MonitoringFeatureFilterBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "filterType": + err = unpopulate(val, "FilterType", &m.FilterType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringInputDataBase. +func (m MonitoringInputDataBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "columns", m.Columns) + populate(objectMap, "dataContext", m.DataContext) + objectMap["inputDataType"] = m.InputDataType + populate(objectMap, "jobInputType", m.JobInputType) + populate(objectMap, "uri", m.URI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringInputDataBase. +func (m *MonitoringInputDataBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "columns": + err = unpopulate(val, "Columns", &m.Columns) + delete(rawMsg, key) + case "dataContext": + err = unpopulate(val, "DataContext", &m.DataContext) + delete(rawMsg, key) + case "inputDataType": + err = unpopulate(val, "InputDataType", &m.InputDataType) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &m.JobInputType) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &m.URI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringSignalBase. +func (m MonitoringSignalBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "mode", m.Mode) + populate(objectMap, "properties", m.Properties) + objectMap["signalType"] = m.SignalType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringSignalBase. +func (m *MonitoringSignalBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &m.Mode) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "signalType": + err = unpopulate(val, "SignalType", &m.SignalType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringTarget. +func (m MonitoringTarget) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "deploymentId", m.DeploymentID) + populate(objectMap, "modelId", m.ModelID) + populate(objectMap, "taskType", m.TaskType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringTarget. +func (m *MonitoringTarget) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "deploymentId": + err = unpopulate(val, "DeploymentID", &m.DeploymentID) + delete(rawMsg, key) + case "modelId": + err = unpopulate(val, "ModelID", &m.ModelID) + delete(rawMsg, key) + case "taskType": + err = unpopulate(val, "TaskType", &m.TaskType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringThreshold. +func (m MonitoringThreshold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", m.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringThreshold. +func (m *MonitoringThreshold) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &m.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitoringWorkspaceConnection. +func (m MonitoringWorkspaceConnection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "environmentVariables", m.EnvironmentVariables) + populate(objectMap, "secrets", m.Secrets) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitoringWorkspaceConnection. +func (m *MonitoringWorkspaceConnection) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "environmentVariables": + err = unpopulate(val, "EnvironmentVariables", &m.EnvironmentVariables) delete(rawMsg, key) - case "amsGradient": - err = unpopulate(val, "AmsGradient", &i.AmsGradient) + case "secrets": + err = unpopulate(val, "Secrets", &m.Secrets) delete(rawMsg, key) - case "augmentations": - err = unpopulate(val, "Augmentations", &i.Augmentations) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Mpi. +func (m Mpi) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["distributionType"] = DistributionTypeMpi + populate(objectMap, "processCountPerInstance", m.ProcessCountPerInstance) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Mpi. +func (m *Mpi) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "distributionType": + err = unpopulate(val, "DistributionType", &m.DistributionType) delete(rawMsg, key) - case "beta1": - err = unpopulate(val, "Beta1", &i.Beta1) + case "processCountPerInstance": + err = unpopulate(val, "ProcessCountPerInstance", &m.ProcessCountPerInstance) delete(rawMsg, key) - case "beta2": - err = unpopulate(val, "Beta2", &i.Beta2) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NCrossValidations. +func (n NCrossValidations) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["mode"] = n.Mode + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NCrossValidations. +func (n *NCrossValidations) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "mode": + err = unpopulate(val, "Mode", &n.Mode) delete(rawMsg, key) - case "boxDetectionsPerImage": - err = unpopulate(val, "BoxDetectionsPerImage", &i.BoxDetectionsPerImage) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NlpFixedParameters. +func (n NlpFixedParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "gradientAccumulationSteps", n.GradientAccumulationSteps) + populate(objectMap, "learningRate", n.LearningRate) + populate(objectMap, "learningRateScheduler", n.LearningRateScheduler) + populate(objectMap, "modelName", n.ModelName) + populate(objectMap, "numberOfEpochs", n.NumberOfEpochs) + populate(objectMap, "trainingBatchSize", n.TrainingBatchSize) + populate(objectMap, "validationBatchSize", n.ValidationBatchSize) + populate(objectMap, "warmupRatio", n.WarmupRatio) + populate(objectMap, "weightDecay", n.WeightDecay) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NlpFixedParameters. +func (n *NlpFixedParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "gradientAccumulationSteps": + err = unpopulate(val, "GradientAccumulationSteps", &n.GradientAccumulationSteps) delete(rawMsg, key) - case "boxScoreThreshold": - err = unpopulate(val, "BoxScoreThreshold", &i.BoxScoreThreshold) + case "learningRate": + err = unpopulate(val, "LearningRate", &n.LearningRate) delete(rawMsg, key) - case "checkpointFrequency": - err = unpopulate(val, "CheckpointFrequency", &i.CheckpointFrequency) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &n.LearningRateScheduler) delete(rawMsg, key) - case "checkpointModel": - err = unpopulate(val, "CheckpointModel", &i.CheckpointModel) + case "modelName": + err = unpopulate(val, "ModelName", &n.ModelName) delete(rawMsg, key) - case "checkpointRunId": - err = unpopulate(val, "CheckpointRunID", &i.CheckpointRunID) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &n.NumberOfEpochs) delete(rawMsg, key) - case "distributed": - err = unpopulate(val, "Distributed", &i.Distributed) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &n.TrainingBatchSize) delete(rawMsg, key) - case "earlyStopping": - err = unpopulate(val, "EarlyStopping", &i.EarlyStopping) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &n.ValidationBatchSize) delete(rawMsg, key) - case "earlyStoppingDelay": - err = unpopulate(val, "EarlyStoppingDelay", &i.EarlyStoppingDelay) + case "warmupRatio": + err = unpopulate(val, "WarmupRatio", &n.WarmupRatio) delete(rawMsg, key) - case "earlyStoppingPatience": - err = unpopulate(val, "EarlyStoppingPatience", &i.EarlyStoppingPatience) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &n.WeightDecay) delete(rawMsg, key) - case "enableOnnxNormalization": - err = unpopulate(val, "EnableOnnxNormalization", &i.EnableOnnxNormalization) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NlpParameterSubspace. +func (n NlpParameterSubspace) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "gradientAccumulationSteps", n.GradientAccumulationSteps) + populate(objectMap, "learningRate", n.LearningRate) + populate(objectMap, "learningRateScheduler", n.LearningRateScheduler) + populate(objectMap, "modelName", n.ModelName) + populate(objectMap, "numberOfEpochs", n.NumberOfEpochs) + populate(objectMap, "trainingBatchSize", n.TrainingBatchSize) + populate(objectMap, "validationBatchSize", n.ValidationBatchSize) + populate(objectMap, "warmupRatio", n.WarmupRatio) + populate(objectMap, "weightDecay", n.WeightDecay) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NlpParameterSubspace. +func (n *NlpParameterSubspace) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "gradientAccumulationSteps": + err = unpopulate(val, "GradientAccumulationSteps", &n.GradientAccumulationSteps) delete(rawMsg, key) - case "evaluationFrequency": - err = unpopulate(val, "EvaluationFrequency", &i.EvaluationFrequency) + case "learningRate": + err = unpopulate(val, "LearningRate", &n.LearningRate) delete(rawMsg, key) - case "gradientAccumulationStep": - err = unpopulate(val, "GradientAccumulationStep", &i.GradientAccumulationStep) + case "learningRateScheduler": + err = unpopulate(val, "LearningRateScheduler", &n.LearningRateScheduler) delete(rawMsg, key) - case "imageSize": - err = unpopulate(val, "ImageSize", &i.ImageSize) + case "modelName": + err = unpopulate(val, "ModelName", &n.ModelName) + delete(rawMsg, key) + case "numberOfEpochs": + err = unpopulate(val, "NumberOfEpochs", &n.NumberOfEpochs) + delete(rawMsg, key) + case "trainingBatchSize": + err = unpopulate(val, "TrainingBatchSize", &n.TrainingBatchSize) + delete(rawMsg, key) + case "validationBatchSize": + err = unpopulate(val, "ValidationBatchSize", &n.ValidationBatchSize) + delete(rawMsg, key) + case "warmupRatio": + err = unpopulate(val, "WarmupRatio", &n.WarmupRatio) + delete(rawMsg, key) + case "weightDecay": + err = unpopulate(val, "WeightDecay", &n.WeightDecay) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NlpSweepSettings. +func (n NlpSweepSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "earlyTermination", n.EarlyTermination) + populate(objectMap, "samplingAlgorithm", n.SamplingAlgorithm) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NlpSweepSettings. +func (n *NlpSweepSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "earlyTermination": + n.EarlyTermination, err = unmarshalEarlyTerminationPolicyClassification(val) + delete(rawMsg, key) + case "samplingAlgorithm": + err = unpopulate(val, "SamplingAlgorithm", &n.SamplingAlgorithm) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NlpVertical. +func (n NlpVertical) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "featurizationSettings", n.FeaturizationSettings) + populate(objectMap, "fixedParameters", n.FixedParameters) + populate(objectMap, "limitSettings", n.LimitSettings) + populate(objectMap, "searchSpace", n.SearchSpace) + populate(objectMap, "sweepSettings", n.SweepSettings) + populate(objectMap, "validationData", n.ValidationData) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NlpVertical. +func (n *NlpVertical) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "featurizationSettings": + err = unpopulate(val, "FeaturizationSettings", &n.FeaturizationSettings) + delete(rawMsg, key) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &n.FixedParameters) + delete(rawMsg, key) + case "limitSettings": + err = unpopulate(val, "LimitSettings", &n.LimitSettings) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &n.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &n.SweepSettings) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &n.ValidationData) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NlpVerticalFeaturizationSettings. +func (n NlpVerticalFeaturizationSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "datasetLanguage", n.DatasetLanguage) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NlpVerticalFeaturizationSettings. +func (n *NlpVerticalFeaturizationSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "datasetLanguage": + err = unpopulate(val, "DatasetLanguage", &n.DatasetLanguage) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NlpVerticalLimitSettings. +func (n NlpVerticalLimitSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "maxConcurrentTrials", n.MaxConcurrentTrials) + populate(objectMap, "maxNodes", n.MaxNodes) + populate(objectMap, "maxTrials", n.MaxTrials) + populate(objectMap, "timeout", n.Timeout) + populate(objectMap, "trialTimeout", n.TrialTimeout) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NlpVerticalLimitSettings. +func (n *NlpVerticalLimitSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "maxConcurrentTrials": + err = unpopulate(val, "MaxConcurrentTrials", &n.MaxConcurrentTrials) delete(rawMsg, key) - case "layersToFreeze": - err = unpopulate(val, "LayersToFreeze", &i.LayersToFreeze) + case "maxNodes": + err = unpopulate(val, "MaxNodes", &n.MaxNodes) delete(rawMsg, key) - case "learningRate": - err = unpopulate(val, "LearningRate", &i.LearningRate) + case "maxTrials": + err = unpopulate(val, "MaxTrials", &n.MaxTrials) delete(rawMsg, key) - case "learningRateScheduler": - err = unpopulate(val, "LearningRateScheduler", &i.LearningRateScheduler) + case "timeout": + err = unpopulate(val, "Timeout", &n.Timeout) delete(rawMsg, key) - case "maxSize": - err = unpopulate(val, "MaxSize", &i.MaxSize) + case "trialTimeout": + err = unpopulate(val, "TrialTimeout", &n.TrialTimeout) delete(rawMsg, key) - case "minSize": - err = unpopulate(val, "MinSize", &i.MinSize) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NodeStateCounts. +func (n NodeStateCounts) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "idleNodeCount", n.IdleNodeCount) + populate(objectMap, "leavingNodeCount", n.LeavingNodeCount) + populate(objectMap, "preemptedNodeCount", n.PreemptedNodeCount) + populate(objectMap, "preparingNodeCount", n.PreparingNodeCount) + populate(objectMap, "runningNodeCount", n.RunningNodeCount) + populate(objectMap, "unusableNodeCount", n.UnusableNodeCount) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NodeStateCounts. +func (n *NodeStateCounts) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "idleNodeCount": + err = unpopulate(val, "IdleNodeCount", &n.IdleNodeCount) delete(rawMsg, key) - case "modelName": - err = unpopulate(val, "ModelName", &i.ModelName) + case "leavingNodeCount": + err = unpopulate(val, "LeavingNodeCount", &n.LeavingNodeCount) delete(rawMsg, key) - case "modelSize": - err = unpopulate(val, "ModelSize", &i.ModelSize) + case "preemptedNodeCount": + err = unpopulate(val, "PreemptedNodeCount", &n.PreemptedNodeCount) delete(rawMsg, key) - case "momentum": - err = unpopulate(val, "Momentum", &i.Momentum) + case "preparingNodeCount": + err = unpopulate(val, "PreparingNodeCount", &n.PreparingNodeCount) delete(rawMsg, key) - case "multiScale": - err = unpopulate(val, "MultiScale", &i.MultiScale) + case "runningNodeCount": + err = unpopulate(val, "RunningNodeCount", &n.RunningNodeCount) delete(rawMsg, key) - case "nesterov": - err = unpopulate(val, "Nesterov", &i.Nesterov) + case "unusableNodeCount": + err = unpopulate(val, "UnusableNodeCount", &n.UnusableNodeCount) delete(rawMsg, key) - case "nmsIouThreshold": - err = unpopulate(val, "NmsIouThreshold", &i.NmsIouThreshold) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Nodes. +func (n Nodes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["nodesValueType"] = n.NodesValueType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Nodes. +func (n *Nodes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nodesValueType": + err = unpopulate(val, "NodesValueType", &n.NodesValueType) delete(rawMsg, key) - case "numberOfEpochs": - err = unpopulate(val, "NumberOfEpochs", &i.NumberOfEpochs) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NoneAuthTypeWorkspaceConnectionProperties. +func (n NoneAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["authType"] = ConnectionAuthTypeNone + populate(objectMap, "category", n.Category) + populateTimeRFC3339(objectMap, "expiryTime", n.ExpiryTime) + populateAny(objectMap, "metadata", n.Metadata) + populate(objectMap, "target", n.Target) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NoneAuthTypeWorkspaceConnectionProperties. +func (n *NoneAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "authType": + err = unpopulate(val, "AuthType", &n.AuthType) delete(rawMsg, key) - case "numberOfWorkers": - err = unpopulate(val, "NumberOfWorkers", &i.NumberOfWorkers) + case "category": + err = unpopulate(val, "Category", &n.Category) delete(rawMsg, key) - case "optimizer": - err = unpopulate(val, "Optimizer", &i.Optimizer) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &n.ExpiryTime) delete(rawMsg, key) - case "randomSeed": - err = unpopulate(val, "RandomSeed", &i.RandomSeed) + case "metadata": + err = unpopulate(val, "Metadata", &n.Metadata) delete(rawMsg, key) - case "stepLRGamma": - err = unpopulate(val, "StepLRGamma", &i.StepLRGamma) + case "target": + err = unpopulate(val, "Target", &n.Target) delete(rawMsg, key) - case "stepLRStepSize": - err = unpopulate(val, "StepLRStepSize", &i.StepLRStepSize) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NoneDatastoreCredentials. +func (n NoneDatastoreCredentials) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["credentialsType"] = CredentialsTypeNone + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NoneDatastoreCredentials. +func (n *NoneDatastoreCredentials) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "credentialsType": + err = unpopulate(val, "CredentialsType", &n.CredentialsType) delete(rawMsg, key) - case "tileGridSize": - err = unpopulate(val, "TileGridSize", &i.TileGridSize) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NotebookAccessTokenResult. +func (n NotebookAccessTokenResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "accessToken", n.AccessToken) + populate(objectMap, "expiresIn", n.ExpiresIn) + populate(objectMap, "hostName", n.HostName) + populate(objectMap, "notebookResourceId", n.NotebookResourceID) + populate(objectMap, "publicDns", n.PublicDNS) + populate(objectMap, "refreshToken", n.RefreshToken) + populate(objectMap, "scope", n.Scope) + populate(objectMap, "tokenType", n.TokenType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NotebookAccessTokenResult. +func (n *NotebookAccessTokenResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "accessToken": + err = unpopulate(val, "AccessToken", &n.AccessToken) delete(rawMsg, key) - case "tileOverlapRatio": - err = unpopulate(val, "TileOverlapRatio", &i.TileOverlapRatio) + case "expiresIn": + err = unpopulate(val, "ExpiresIn", &n.ExpiresIn) delete(rawMsg, key) - case "tilePredictionsNmsThreshold": - err = unpopulate(val, "TilePredictionsNmsThreshold", &i.TilePredictionsNmsThreshold) + case "hostName": + err = unpopulate(val, "HostName", &n.HostName) delete(rawMsg, key) - case "trainingBatchSize": - err = unpopulate(val, "TrainingBatchSize", &i.TrainingBatchSize) + case "notebookResourceId": + err = unpopulate(val, "NotebookResourceID", &n.NotebookResourceID) delete(rawMsg, key) - case "validationBatchSize": - err = unpopulate(val, "ValidationBatchSize", &i.ValidationBatchSize) + case "publicDns": + err = unpopulate(val, "PublicDNS", &n.PublicDNS) delete(rawMsg, key) - case "validationIouThreshold": - err = unpopulate(val, "ValidationIouThreshold", &i.ValidationIouThreshold) + case "refreshToken": + err = unpopulate(val, "RefreshToken", &n.RefreshToken) delete(rawMsg, key) - case "validationMetricType": - err = unpopulate(val, "ValidationMetricType", &i.ValidationMetricType) + case "scope": + err = unpopulate(val, "Scope", &n.Scope) delete(rawMsg, key) - case "warmupCosineLRCycles": - err = unpopulate(val, "WarmupCosineLRCycles", &i.WarmupCosineLRCycles) + case "tokenType": + err = unpopulate(val, "TokenType", &n.TokenType) delete(rawMsg, key) - case "warmupCosineLRWarmupEpochs": - err = unpopulate(val, "WarmupCosineLRWarmupEpochs", &i.WarmupCosineLRWarmupEpochs) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NotebookPreparationError. +func (n NotebookPreparationError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "errorMessage", n.ErrorMessage) + populate(objectMap, "statusCode", n.StatusCode) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NotebookPreparationError. +func (n *NotebookPreparationError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "errorMessage": + err = unpopulate(val, "ErrorMessage", &n.ErrorMessage) delete(rawMsg, key) - case "weightDecay": - err = unpopulate(val, "WeightDecay", &i.WeightDecay) + case "statusCode": + err = unpopulate(val, "StatusCode", &n.StatusCode) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageObjectDetection. -func (i ImageObjectDetection) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type NotebookResourceInfo. +func (n NotebookResourceInfo) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limitSettings", i.LimitSettings) - populate(objectMap, "logVerbosity", i.LogVerbosity) - populate(objectMap, "modelSettings", i.ModelSettings) - populate(objectMap, "primaryMetric", i.PrimaryMetric) - populate(objectMap, "searchSpace", i.SearchSpace) - populate(objectMap, "sweepSettings", i.SweepSettings) - populate(objectMap, "targetColumnName", i.TargetColumnName) - objectMap["taskType"] = TaskTypeImageObjectDetection - populate(objectMap, "trainingData", i.TrainingData) - populate(objectMap, "validationData", i.ValidationData) - populate(objectMap, "validationDataSize", i.ValidationDataSize) + populate(objectMap, "fqdn", n.Fqdn) + populate(objectMap, "isPrivateLinkEnabled", n.IsPrivateLinkEnabled) + populate(objectMap, "notebookPreparationError", n.NotebookPreparationError) + populate(objectMap, "resourceId", n.ResourceID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageObjectDetection. -func (i *ImageObjectDetection) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type NotebookResourceInfo. +func (n *NotebookResourceInfo) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } for key, val := range rawMsg { var err error switch key { - case "limitSettings": - err = unpopulate(val, "LimitSettings", &i.LimitSettings) - delete(rawMsg, key) - case "logVerbosity": - err = unpopulate(val, "LogVerbosity", &i.LogVerbosity) - delete(rawMsg, key) - case "modelSettings": - err = unpopulate(val, "ModelSettings", &i.ModelSettings) - delete(rawMsg, key) - case "primaryMetric": - err = unpopulate(val, "PrimaryMetric", &i.PrimaryMetric) - delete(rawMsg, key) - case "searchSpace": - err = unpopulate(val, "SearchSpace", &i.SearchSpace) - delete(rawMsg, key) - case "sweepSettings": - err = unpopulate(val, "SweepSettings", &i.SweepSettings) - delete(rawMsg, key) - case "targetColumnName": - err = unpopulate(val, "TargetColumnName", &i.TargetColumnName) - delete(rawMsg, key) - case "taskType": - err = unpopulate(val, "TaskType", &i.TaskType) + case "fqdn": + err = unpopulate(val, "Fqdn", &n.Fqdn) delete(rawMsg, key) - case "trainingData": - err = unpopulate(val, "TrainingData", &i.TrainingData) + case "isPrivateLinkEnabled": + err = unpopulate(val, "IsPrivateLinkEnabled", &n.IsPrivateLinkEnabled) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &i.ValidationData) + case "notebookPreparationError": + err = unpopulate(val, "NotebookPreparationError", &n.NotebookPreparationError) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + case "resourceId": + err = unpopulate(val, "ResourceID", &n.ResourceID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageObjectDetectionBase. -func (i ImageObjectDetectionBase) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type NotificationSetting. +func (n NotificationSetting) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limitSettings", i.LimitSettings) - populate(objectMap, "modelSettings", i.ModelSettings) - populate(objectMap, "searchSpace", i.SearchSpace) - populate(objectMap, "sweepSettings", i.SweepSettings) - populate(objectMap, "validationData", i.ValidationData) - populate(objectMap, "validationDataSize", i.ValidationDataSize) + populate(objectMap, "emailOn", n.EmailOn) + populate(objectMap, "emails", n.Emails) + populate(objectMap, "webhooks", n.Webhooks) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageObjectDetectionBase. -func (i *ImageObjectDetectionBase) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type NotificationSetting. +func (n *NotificationSetting) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } for key, val := range rawMsg { var err error switch key { - case "limitSettings": - err = unpopulate(val, "LimitSettings", &i.LimitSettings) + case "emailOn": + err = unpopulate(val, "EmailOn", &n.EmailOn) delete(rawMsg, key) - case "modelSettings": - err = unpopulate(val, "ModelSettings", &i.ModelSettings) + case "emails": + err = unpopulate(val, "Emails", &n.Emails) delete(rawMsg, key) - case "searchSpace": - err = unpopulate(val, "SearchSpace", &i.SearchSpace) + case "webhooks": + n.Webhooks, err = unmarshalWebhookClassificationMap(val) delete(rawMsg, key) - case "sweepSettings": - err = unpopulate(val, "SweepSettings", &i.SweepSettings) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NumericalDataDriftMetricThreshold. +func (n NumericalDataDriftMetricThreshold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["dataType"] = MonitoringFeatureDataTypeNumerical + populate(objectMap, "metric", n.Metric) + populate(objectMap, "threshold", n.Threshold) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NumericalDataDriftMetricThreshold. +func (n *NumericalDataDriftMetricThreshold) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataType": + err = unpopulate(val, "DataType", &n.DataType) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &i.ValidationData) + case "metric": + err = unpopulate(val, "Metric", &n.Metric) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + case "threshold": + err = unpopulate(val, "Threshold", &n.Threshold) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageSweepSettings. -func (i ImageSweepSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type NumericalDataQualityMetricThreshold. +func (n NumericalDataQualityMetricThreshold) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "earlyTermination", i.EarlyTermination) - populate(objectMap, "samplingAlgorithm", i.SamplingAlgorithm) + objectMap["dataType"] = MonitoringFeatureDataTypeNumerical + populate(objectMap, "metric", n.Metric) + populate(objectMap, "threshold", n.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageSweepSettings. -func (i *ImageSweepSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type NumericalDataQualityMetricThreshold. +func (n *NumericalDataQualityMetricThreshold) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } for key, val := range rawMsg { var err error switch key { - case "earlyTermination": - i.EarlyTermination, err = unmarshalEarlyTerminationPolicyClassification(val) + case "dataType": + err = unpopulate(val, "DataType", &n.DataType) delete(rawMsg, key) - case "samplingAlgorithm": - err = unpopulate(val, "SamplingAlgorithm", &i.SamplingAlgorithm) + case "metric": + err = unpopulate(val, "Metric", &n.Metric) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &n.Threshold) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageVertical. -func (i ImageVertical) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type NumericalPredictionDriftMetricThreshold. +func (n NumericalPredictionDriftMetricThreshold) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limitSettings", i.LimitSettings) - populate(objectMap, "sweepSettings", i.SweepSettings) - populate(objectMap, "validationData", i.ValidationData) - populate(objectMap, "validationDataSize", i.ValidationDataSize) + objectMap["dataType"] = MonitoringFeatureDataTypeNumerical + populate(objectMap, "metric", n.Metric) + populate(objectMap, "threshold", n.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageVertical. -func (i *ImageVertical) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type NumericalPredictionDriftMetricThreshold. +func (n *NumericalPredictionDriftMetricThreshold) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } for key, val := range rawMsg { var err error switch key { - case "limitSettings": - err = unpopulate(val, "LimitSettings", &i.LimitSettings) - delete(rawMsg, key) - case "sweepSettings": - err = unpopulate(val, "SweepSettings", &i.SweepSettings) + case "dataType": + err = unpopulate(val, "DataType", &n.DataType) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &i.ValidationData) + case "metric": + err = unpopulate(val, "Metric", &n.Metric) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &i.ValidationDataSize) + case "threshold": + err = unpopulate(val, "Threshold", &n.Threshold) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", n, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type InferenceContainerProperties. -func (i InferenceContainerProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Objective. +func (o Objective) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "livenessRoute", i.LivenessRoute) - populate(objectMap, "readinessRoute", i.ReadinessRoute) - populate(objectMap, "scoringRoute", i.ScoringRoute) + populate(objectMap, "goal", o.Goal) + populate(objectMap, "primaryMetric", o.PrimaryMetric) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type InferenceContainerProperties. -func (i *InferenceContainerProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Objective. +func (o *Objective) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "livenessRoute": - err = unpopulate(val, "LivenessRoute", &i.LivenessRoute) - delete(rawMsg, key) - case "readinessRoute": - err = unpopulate(val, "ReadinessRoute", &i.ReadinessRoute) + case "goal": + err = unpopulate(val, "Goal", &o.Goal) delete(rawMsg, key) - case "scoringRoute": - err = unpopulate(val, "ScoringRoute", &i.ScoringRoute) + case "primaryMetric": + err = unpopulate(val, "PrimaryMetric", &o.PrimaryMetric) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type InstanceTypeSchema. -func (i InstanceTypeSchema) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OneLakeArtifact. +func (o OneLakeArtifact) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nodeSelector", i.NodeSelector) - populate(objectMap, "resources", i.Resources) + populate(objectMap, "artifactName", o.ArtifactName) + objectMap["artifactType"] = o.ArtifactType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type InstanceTypeSchema. -func (i *InstanceTypeSchema) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OneLakeArtifact. +func (o *OneLakeArtifact) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "nodeSelector": - err = unpopulate(val, "NodeSelector", &i.NodeSelector) + case "artifactName": + err = unpopulate(val, "ArtifactName", &o.ArtifactName) delete(rawMsg, key) - case "resources": - err = unpopulate(val, "Resources", &i.Resources) + case "artifactType": + err = unpopulate(val, "ArtifactType", &o.ArtifactType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type InstanceTypeSchemaResources. -func (i InstanceTypeSchemaResources) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OneLakeDatastore. +func (o OneLakeDatastore) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "limits", i.Limits) - populate(objectMap, "requests", i.Requests) + populate(objectMap, "artifact", o.Artifact) + populate(objectMap, "credentials", o.Credentials) + objectMap["datastoreType"] = DatastoreTypeOneLake + populate(objectMap, "description", o.Description) + populate(objectMap, "endpoint", o.Endpoint) + populate(objectMap, "intellectualProperty", o.IntellectualProperty) + populate(objectMap, "isDefault", o.IsDefault) + populate(objectMap, "oneLakeWorkspaceName", o.OneLakeWorkspaceName) + populate(objectMap, "properties", o.Properties) + populate(objectMap, "serviceDataAccessAuthIdentity", o.ServiceDataAccessAuthIdentity) + populate(objectMap, "tags", o.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type InstanceTypeSchemaResources. -func (i *InstanceTypeSchemaResources) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OneLakeDatastore. +func (o *OneLakeDatastore) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "limits": - err = unpopulate(val, "Limits", &i.Limits) + case "artifact": + o.Artifact, err = unmarshalOneLakeArtifactClassification(val) delete(rawMsg, key) - case "requests": - err = unpopulate(val, "Requests", &i.Requests) + case "credentials": + o.Credentials, err = unmarshalDatastoreCredentialsClassification(val) + delete(rawMsg, key) + case "datastoreType": + err = unpopulate(val, "DatastoreType", &o.DatastoreType) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &o.Description) + delete(rawMsg, key) + case "endpoint": + err = unpopulate(val, "Endpoint", &o.Endpoint) + delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &o.IntellectualProperty) + delete(rawMsg, key) + case "isDefault": + err = unpopulate(val, "IsDefault", &o.IsDefault) + delete(rawMsg, key) + case "oneLakeWorkspaceName": + err = unpopulate(val, "OneLakeWorkspaceName", &o.OneLakeWorkspaceName) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &o.Properties) + delete(rawMsg, key) + case "serviceDataAccessAuthIdentity": + err = unpopulate(val, "ServiceDataAccessAuthIdentity", &o.ServiceDataAccessAuthIdentity) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &o.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobBase. -func (j JobBase) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineDeployment. +func (o OnlineDeployment) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", j.ID) - populate(objectMap, "name", j.Name) - populate(objectMap, "properties", j.Properties) - populate(objectMap, "systemData", j.SystemData) - populate(objectMap, "type", j.Type) + populate(objectMap, "id", o.ID) + populate(objectMap, "identity", o.Identity) + populate(objectMap, "kind", o.Kind) + populate(objectMap, "location", o.Location) + populate(objectMap, "name", o.Name) + populate(objectMap, "properties", o.Properties) + populate(objectMap, "sku", o.SKU) + populate(objectMap, "systemData", o.SystemData) + populate(objectMap, "tags", o.Tags) + populate(objectMap, "type", o.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobBase. -func (j *JobBase) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineDeployment. +func (o *OnlineDeployment) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { case "id": - err = unpopulate(val, "ID", &j.ID) + err = unpopulate(val, "ID", &o.ID) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &o.Identity) + delete(rawMsg, key) + case "kind": + err = unpopulate(val, "Kind", &o.Kind) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &o.Location) delete(rawMsg, key) case "name": - err = unpopulate(val, "Name", &j.Name) + err = unpopulate(val, "Name", &o.Name) delete(rawMsg, key) case "properties": - j.Properties, err = unmarshalJobBasePropertiesClassification(val) + o.Properties, err = unmarshalOnlineDeploymentPropertiesClassification(val) + delete(rawMsg, key) + case "sku": + err = unpopulate(val, "SKU", &o.SKU) delete(rawMsg, key) case "systemData": - err = unpopulate(val, "SystemData", &j.SystemData) + err = unpopulate(val, "SystemData", &o.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &o.Tags) delete(rawMsg, key) case "type": - err = unpopulate(val, "Type", &j.Type) + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobBaseProperties. -func (j JobBaseProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineDeploymentProperties. +func (o OnlineDeploymentProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "componentId", j.ComponentID) - populate(objectMap, "computeId", j.ComputeID) - populate(objectMap, "description", j.Description) - populate(objectMap, "displayName", j.DisplayName) - populate(objectMap, "experimentName", j.ExperimentName) - populate(objectMap, "identity", j.Identity) - populate(objectMap, "isArchived", j.IsArchived) - objectMap["jobType"] = j.JobType - populate(objectMap, "properties", j.Properties) - populate(objectMap, "services", j.Services) - populate(objectMap, "status", j.Status) - populate(objectMap, "tags", j.Tags) + populate(objectMap, "appInsightsEnabled", o.AppInsightsEnabled) + populate(objectMap, "codeConfiguration", o.CodeConfiguration) + populate(objectMap, "dataCollector", o.DataCollector) + populate(objectMap, "description", o.Description) + populate(objectMap, "egressPublicNetworkAccess", o.EgressPublicNetworkAccess) + objectMap["endpointComputeType"] = o.EndpointComputeType + populate(objectMap, "environmentId", o.EnvironmentID) + populate(objectMap, "environmentVariables", o.EnvironmentVariables) + populate(objectMap, "instanceType", o.InstanceType) + populate(objectMap, "livenessProbe", o.LivenessProbe) + populate(objectMap, "model", o.Model) + populate(objectMap, "modelMountPath", o.ModelMountPath) + populate(objectMap, "properties", o.Properties) + populate(objectMap, "provisioningState", o.ProvisioningState) + populate(objectMap, "readinessProbe", o.ReadinessProbe) + populate(objectMap, "requestSettings", o.RequestSettings) + populate(objectMap, "scaleSettings", o.ScaleSettings) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobBaseProperties. -func (j *JobBaseProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineDeploymentProperties. +func (o *OnlineDeploymentProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "componentId": - err = unpopulate(val, "ComponentID", &j.ComponentID) + case "appInsightsEnabled": + err = unpopulate(val, "AppInsightsEnabled", &o.AppInsightsEnabled) delete(rawMsg, key) - case "computeId": - err = unpopulate(val, "ComputeID", &j.ComputeID) + case "codeConfiguration": + err = unpopulate(val, "CodeConfiguration", &o.CodeConfiguration) + delete(rawMsg, key) + case "dataCollector": + err = unpopulate(val, "DataCollector", &o.DataCollector) delete(rawMsg, key) case "description": - err = unpopulate(val, "Description", &j.Description) + err = unpopulate(val, "Description", &o.Description) delete(rawMsg, key) - case "displayName": - err = unpopulate(val, "DisplayName", &j.DisplayName) + case "egressPublicNetworkAccess": + err = unpopulate(val, "EgressPublicNetworkAccess", &o.EgressPublicNetworkAccess) delete(rawMsg, key) - case "experimentName": - err = unpopulate(val, "ExperimentName", &j.ExperimentName) + case "endpointComputeType": + err = unpopulate(val, "EndpointComputeType", &o.EndpointComputeType) delete(rawMsg, key) - case "identity": - j.Identity, err = unmarshalIdentityConfigurationClassification(val) + case "environmentId": + err = unpopulate(val, "EnvironmentID", &o.EnvironmentID) delete(rawMsg, key) - case "isArchived": - err = unpopulate(val, "IsArchived", &j.IsArchived) + case "environmentVariables": + err = unpopulate(val, "EnvironmentVariables", &o.EnvironmentVariables) delete(rawMsg, key) - case "jobType": - err = unpopulate(val, "JobType", &j.JobType) + case "instanceType": + err = unpopulate(val, "InstanceType", &o.InstanceType) + delete(rawMsg, key) + case "livenessProbe": + err = unpopulate(val, "LivenessProbe", &o.LivenessProbe) + delete(rawMsg, key) + case "model": + err = unpopulate(val, "Model", &o.Model) + delete(rawMsg, key) + case "modelMountPath": + err = unpopulate(val, "ModelMountPath", &o.ModelMountPath) delete(rawMsg, key) case "properties": - err = unpopulate(val, "Properties", &j.Properties) + err = unpopulate(val, "Properties", &o.Properties) delete(rawMsg, key) - case "services": - err = unpopulate(val, "Services", &j.Services) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &o.ProvisioningState) delete(rawMsg, key) - case "status": - err = unpopulate(val, "Status", &j.Status) + case "readinessProbe": + err = unpopulate(val, "ReadinessProbe", &o.ReadinessProbe) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &j.Tags) + case "requestSettings": + err = unpopulate(val, "RequestSettings", &o.RequestSettings) + delete(rawMsg, key) + case "scaleSettings": + o.ScaleSettings, err = unmarshalOnlineScaleSettingsClassification(val) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobBaseResourceArmPaginatedResult. -func (j JobBaseResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineDeploymentTrackedResourceArmPaginatedResult. +func (o OnlineDeploymentTrackedResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", j.NextLink) - populate(objectMap, "value", j.Value) + populate(objectMap, "nextLink", o.NextLink) + populate(objectMap, "value", o.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobBaseResourceArmPaginatedResult. -func (j *JobBaseResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineDeploymentTrackedResourceArmPaginatedResult. +func (o *OnlineDeploymentTrackedResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { case "nextLink": - err = unpopulate(val, "NextLink", &j.NextLink) + err = unpopulate(val, "NextLink", &o.NextLink) delete(rawMsg, key) case "value": - err = unpopulate(val, "Value", &j.Value) + err = unpopulate(val, "Value", &o.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobInput. -func (j JobInput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineEndpoint. +func (o OnlineEndpoint) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", j.Description) - objectMap["jobInputType"] = j.JobInputType + populate(objectMap, "id", o.ID) + populate(objectMap, "identity", o.Identity) + populate(objectMap, "kind", o.Kind) + populate(objectMap, "location", o.Location) + populate(objectMap, "name", o.Name) + populate(objectMap, "properties", o.Properties) + populate(objectMap, "sku", o.SKU) + populate(objectMap, "systemData", o.SystemData) + populate(objectMap, "tags", o.Tags) + populate(objectMap, "type", o.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobInput. -func (j *JobInput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineEndpoint. +func (o *OnlineEndpoint) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &j.Description) + case "id": + err = unpopulate(val, "ID", &o.ID) delete(rawMsg, key) - case "jobInputType": - err = unpopulate(val, "JobInputType", &j.JobInputType) + case "identity": + err = unpopulate(val, "Identity", &o.Identity) + delete(rawMsg, key) + case "kind": + err = unpopulate(val, "Kind", &o.Kind) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &o.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &o.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &o.Properties) + delete(rawMsg, key) + case "sku": + err = unpopulate(val, "SKU", &o.SKU) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &o.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &o.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobLimits. -func (j JobLimits) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineEndpointProperties. +func (o OnlineEndpointProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["jobLimitsType"] = j.JobLimitsType - populate(objectMap, "timeout", j.Timeout) + populate(objectMap, "authMode", o.AuthMode) + populate(objectMap, "compute", o.Compute) + populate(objectMap, "description", o.Description) + populate(objectMap, "keys", o.Keys) + populate(objectMap, "mirrorTraffic", o.MirrorTraffic) + populate(objectMap, "properties", o.Properties) + populate(objectMap, "provisioningState", o.ProvisioningState) + populate(objectMap, "publicNetworkAccess", o.PublicNetworkAccess) + populate(objectMap, "scoringUri", o.ScoringURI) + populate(objectMap, "swaggerUri", o.SwaggerURI) + populate(objectMap, "traffic", o.Traffic) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobLimits. -func (j *JobLimits) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineEndpointProperties. +func (o *OnlineEndpointProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "jobLimitsType": - err = unpopulate(val, "JobLimitsType", &j.JobLimitsType) + case "authMode": + err = unpopulate(val, "AuthMode", &o.AuthMode) + delete(rawMsg, key) + case "compute": + err = unpopulate(val, "Compute", &o.Compute) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &o.Description) + delete(rawMsg, key) + case "keys": + err = unpopulate(val, "Keys", &o.Keys) + delete(rawMsg, key) + case "mirrorTraffic": + err = unpopulate(val, "MirrorTraffic", &o.MirrorTraffic) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &o.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &o.ProvisioningState) delete(rawMsg, key) - case "timeout": - err = unpopulate(val, "Timeout", &j.Timeout) + case "publicNetworkAccess": + err = unpopulate(val, "PublicNetworkAccess", &o.PublicNetworkAccess) + delete(rawMsg, key) + case "scoringUri": + err = unpopulate(val, "ScoringURI", &o.ScoringURI) + delete(rawMsg, key) + case "swaggerUri": + err = unpopulate(val, "SwaggerURI", &o.SwaggerURI) + delete(rawMsg, key) + case "traffic": + err = unpopulate(val, "Traffic", &o.Traffic) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobOutput. -func (j JobOutput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineEndpointTrackedResourceArmPaginatedResult. +func (o OnlineEndpointTrackedResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", j.Description) - objectMap["jobOutputType"] = j.JobOutputType + populate(objectMap, "nextLink", o.NextLink) + populate(objectMap, "value", o.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobOutput. -func (j *JobOutput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineEndpointTrackedResourceArmPaginatedResult. +func (o *OnlineEndpointTrackedResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &j.Description) + case "nextLink": + err = unpopulate(val, "NextLink", &o.NextLink) delete(rawMsg, key) - case "jobOutputType": - err = unpopulate(val, "JobOutputType", &j.JobOutputType) + case "value": + err = unpopulate(val, "Value", &o.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobResourceConfiguration. -func (j JobResourceConfiguration) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineInferenceConfiguration. +func (o OnlineInferenceConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "dockerArgs", j.DockerArgs) - populate(objectMap, "instanceCount", j.InstanceCount) - populate(objectMap, "instanceType", j.InstanceType) - populate(objectMap, "properties", j.Properties) - populate(objectMap, "shmSize", j.ShmSize) + populate(objectMap, "configurations", o.Configurations) + populate(objectMap, "entryScript", o.EntryScript) + populate(objectMap, "livenessRoute", o.LivenessRoute) + populate(objectMap, "readinessRoute", o.ReadinessRoute) + populate(objectMap, "scoringRoute", o.ScoringRoute) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobResourceConfiguration. -func (j *JobResourceConfiguration) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineInferenceConfiguration. +func (o *OnlineInferenceConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "dockerArgs": - err = unpopulate(val, "DockerArgs", &j.DockerArgs) + case "configurations": + err = unpopulate(val, "Configurations", &o.Configurations) delete(rawMsg, key) - case "instanceCount": - err = unpopulate(val, "InstanceCount", &j.InstanceCount) + case "entryScript": + err = unpopulate(val, "EntryScript", &o.EntryScript) delete(rawMsg, key) - case "instanceType": - err = unpopulate(val, "InstanceType", &j.InstanceType) + case "livenessRoute": + err = unpopulate(val, "LivenessRoute", &o.LivenessRoute) delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &j.Properties) + case "readinessRoute": + err = unpopulate(val, "ReadinessRoute", &o.ReadinessRoute) delete(rawMsg, key) - case "shmSize": - err = unpopulate(val, "ShmSize", &j.ShmSize) + case "scoringRoute": + err = unpopulate(val, "ScoringRoute", &o.ScoringRoute) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobScheduleAction. -func (j JobScheduleAction) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineRequestSettings. +func (o OnlineRequestSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["actionType"] = ScheduleActionTypeCreateJob - populate(objectMap, "jobDefinition", j.JobDefinition) + populate(objectMap, "maxConcurrentRequestsPerInstance", o.MaxConcurrentRequestsPerInstance) + populate(objectMap, "maxQueueWait", o.MaxQueueWait) + populate(objectMap, "requestTimeout", o.RequestTimeout) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobScheduleAction. -func (j *JobScheduleAction) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineRequestSettings. +func (o *OnlineRequestSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "actionType": - err = unpopulate(val, "ActionType", &j.ActionType) + case "maxConcurrentRequestsPerInstance": + err = unpopulate(val, "MaxConcurrentRequestsPerInstance", &o.MaxConcurrentRequestsPerInstance) delete(rawMsg, key) - case "jobDefinition": - j.JobDefinition, err = unmarshalJobBasePropertiesClassification(val) + case "maxQueueWait": + err = unpopulate(val, "MaxQueueWait", &o.MaxQueueWait) + delete(rawMsg, key) + case "requestTimeout": + err = unpopulate(val, "RequestTimeout", &o.RequestTimeout) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type JobService. -func (j JobService) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnlineScaleSettings. +func (o OnlineScaleSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "endpoint", j.Endpoint) - populate(objectMap, "errorMessage", j.ErrorMessage) - populate(objectMap, "jobServiceType", j.JobServiceType) - populate(objectMap, "port", j.Port) - populate(objectMap, "properties", j.Properties) - populate(objectMap, "status", j.Status) + objectMap["scaleType"] = o.ScaleType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type JobService. -func (j *JobService) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineScaleSettings. +func (o *OnlineScaleSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "endpoint": - err = unpopulate(val, "Endpoint", &j.Endpoint) - delete(rawMsg, key) - case "errorMessage": - err = unpopulate(val, "ErrorMessage", &j.ErrorMessage) - delete(rawMsg, key) - case "jobServiceType": - err = unpopulate(val, "JobServiceType", &j.JobServiceType) - delete(rawMsg, key) - case "port": - err = unpopulate(val, "Port", &j.Port) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &j.Properties) - delete(rawMsg, key) - case "status": - err = unpopulate(val, "Status", &j.Status) + case "scaleType": + err = unpopulate(val, "ScaleType", &o.ScaleType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", j, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Kubernetes. -func (k Kubernetes) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OperationDisplay. +func (o OperationDisplay) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "computeLocation", k.ComputeLocation) - objectMap["computeType"] = ComputeTypeKubernetes - populateTimeRFC3339(objectMap, "createdOn", k.CreatedOn) - populate(objectMap, "description", k.Description) - populate(objectMap, "disableLocalAuth", k.DisableLocalAuth) - populate(objectMap, "isAttachedCompute", k.IsAttachedCompute) - populateTimeRFC3339(objectMap, "modifiedOn", k.ModifiedOn) - populate(objectMap, "properties", k.Properties) - populate(objectMap, "provisioningErrors", k.ProvisioningErrors) - populate(objectMap, "provisioningState", k.ProvisioningState) - populate(objectMap, "resourceId", k.ResourceID) + populate(objectMap, "description", o.Description) + populate(objectMap, "operation", o.Operation) + populate(objectMap, "provider", o.Provider) + populate(objectMap, "resource", o.Resource) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Kubernetes. -func (k *Kubernetes) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OperationDisplay. +func (o *OperationDisplay) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "computeLocation": - err = unpopulate(val, "ComputeLocation", &k.ComputeLocation) - delete(rawMsg, key) - case "computeType": - err = unpopulate(val, "ComputeType", &k.ComputeType) - delete(rawMsg, key) - case "createdOn": - err = unpopulateTimeRFC3339(val, "CreatedOn", &k.CreatedOn) - delete(rawMsg, key) case "description": - err = unpopulate(val, "Description", &k.Description) - delete(rawMsg, key) - case "disableLocalAuth": - err = unpopulate(val, "DisableLocalAuth", &k.DisableLocalAuth) - delete(rawMsg, key) - case "isAttachedCompute": - err = unpopulate(val, "IsAttachedCompute", &k.IsAttachedCompute) - delete(rawMsg, key) - case "modifiedOn": - err = unpopulateTimeRFC3339(val, "ModifiedOn", &k.ModifiedOn) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &k.Properties) + err = unpopulate(val, "Description", &o.Description) delete(rawMsg, key) - case "provisioningErrors": - err = unpopulate(val, "ProvisioningErrors", &k.ProvisioningErrors) + case "operation": + err = unpopulate(val, "Operation", &o.Operation) delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &k.ProvisioningState) + case "provider": + err = unpopulate(val, "Provider", &o.Provider) delete(rawMsg, key) - case "resourceId": - err = unpopulate(val, "ResourceID", &k.ResourceID) + case "resource": + err = unpopulate(val, "Resource", &o.Resource) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type KubernetesOnlineDeployment. -func (k KubernetesOnlineDeployment) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OutboundRule. +func (o OutboundRule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "appInsightsEnabled", k.AppInsightsEnabled) - populate(objectMap, "codeConfiguration", k.CodeConfiguration) - populate(objectMap, "containerResourceRequirements", k.ContainerResourceRequirements) - populate(objectMap, "description", k.Description) - populate(objectMap, "egressPublicNetworkAccess", k.EgressPublicNetworkAccess) - objectMap["endpointComputeType"] = EndpointComputeTypeKubernetes - populate(objectMap, "environmentId", k.EnvironmentID) - populate(objectMap, "environmentVariables", k.EnvironmentVariables) - populate(objectMap, "instanceType", k.InstanceType) - populate(objectMap, "livenessProbe", k.LivenessProbe) - populate(objectMap, "model", k.Model) - populate(objectMap, "modelMountPath", k.ModelMountPath) - populate(objectMap, "properties", k.Properties) - populate(objectMap, "provisioningState", k.ProvisioningState) - populate(objectMap, "readinessProbe", k.ReadinessProbe) - populate(objectMap, "requestSettings", k.RequestSettings) - populate(objectMap, "scaleSettings", k.ScaleSettings) + populate(objectMap, "category", o.Category) + populate(objectMap, "status", o.Status) + objectMap["type"] = o.Type return json.Marshal(objectMap) } - -// UnmarshalJSON implements the json.Unmarshaller interface for type KubernetesOnlineDeployment. -func (k *KubernetesOnlineDeployment) UnmarshalJSON(data []byte) error { - var rawMsg map[string]json.RawMessage - if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) - } - for key, val := range rawMsg { - var err error - switch key { - case "appInsightsEnabled": - err = unpopulate(val, "AppInsightsEnabled", &k.AppInsightsEnabled) - delete(rawMsg, key) - case "codeConfiguration": - err = unpopulate(val, "CodeConfiguration", &k.CodeConfiguration) - delete(rawMsg, key) - case "containerResourceRequirements": - err = unpopulate(val, "ContainerResourceRequirements", &k.ContainerResourceRequirements) - delete(rawMsg, key) - case "description": - err = unpopulate(val, "Description", &k.Description) - delete(rawMsg, key) - case "egressPublicNetworkAccess": - err = unpopulate(val, "EgressPublicNetworkAccess", &k.EgressPublicNetworkAccess) - delete(rawMsg, key) - case "endpointComputeType": - err = unpopulate(val, "EndpointComputeType", &k.EndpointComputeType) - delete(rawMsg, key) - case "environmentId": - err = unpopulate(val, "EnvironmentID", &k.EnvironmentID) - delete(rawMsg, key) - case "environmentVariables": - err = unpopulate(val, "EnvironmentVariables", &k.EnvironmentVariables) - delete(rawMsg, key) - case "instanceType": - err = unpopulate(val, "InstanceType", &k.InstanceType) - delete(rawMsg, key) - case "livenessProbe": - err = unpopulate(val, "LivenessProbe", &k.LivenessProbe) - delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &k.Model) - delete(rawMsg, key) - case "modelMountPath": - err = unpopulate(val, "ModelMountPath", &k.ModelMountPath) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &k.Properties) - delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &k.ProvisioningState) - delete(rawMsg, key) - case "readinessProbe": - err = unpopulate(val, "ReadinessProbe", &k.ReadinessProbe) + +// UnmarshalJSON implements the json.Unmarshaller interface for type OutboundRule. +func (o *OutboundRule) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "category": + err = unpopulate(val, "Category", &o.Category) delete(rawMsg, key) - case "requestSettings": - err = unpopulate(val, "RequestSettings", &k.RequestSettings) + case "status": + err = unpopulate(val, "Status", &o.Status) delete(rawMsg, key) - case "scaleSettings": - k.ScaleSettings, err = unmarshalOnlineScaleSettingsClassification(val) + case "type": + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type KubernetesProperties. -func (k KubernetesProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OutboundRuleBasicResource. +func (o OutboundRuleBasicResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "defaultInstanceType", k.DefaultInstanceType) - populate(objectMap, "extensionInstanceReleaseTrain", k.ExtensionInstanceReleaseTrain) - populate(objectMap, "extensionPrincipalId", k.ExtensionPrincipalID) - populate(objectMap, "instanceTypes", k.InstanceTypes) - populate(objectMap, "namespace", k.Namespace) - populate(objectMap, "relayConnectionString", k.RelayConnectionString) - populate(objectMap, "serviceBusConnectionString", k.ServiceBusConnectionString) - populate(objectMap, "vcName", k.VcName) + populate(objectMap, "id", o.ID) + populate(objectMap, "name", o.Name) + populate(objectMap, "properties", o.Properties) + populate(objectMap, "systemData", o.SystemData) + populate(objectMap, "type", o.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type KubernetesProperties. -func (k *KubernetesProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OutboundRuleBasicResource. +func (o *OutboundRuleBasicResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "defaultInstanceType": - err = unpopulate(val, "DefaultInstanceType", &k.DefaultInstanceType) - delete(rawMsg, key) - case "extensionInstanceReleaseTrain": - err = unpopulate(val, "ExtensionInstanceReleaseTrain", &k.ExtensionInstanceReleaseTrain) - delete(rawMsg, key) - case "extensionPrincipalId": - err = unpopulate(val, "ExtensionPrincipalID", &k.ExtensionPrincipalID) - delete(rawMsg, key) - case "instanceTypes": - err = unpopulate(val, "InstanceTypes", &k.InstanceTypes) + case "id": + err = unpopulate(val, "ID", &o.ID) delete(rawMsg, key) - case "namespace": - err = unpopulate(val, "Namespace", &k.Namespace) + case "name": + err = unpopulate(val, "Name", &o.Name) delete(rawMsg, key) - case "relayConnectionString": - err = unpopulate(val, "RelayConnectionString", &k.RelayConnectionString) + case "properties": + o.Properties, err = unmarshalOutboundRuleClassification(val) delete(rawMsg, key) - case "serviceBusConnectionString": - err = unpopulate(val, "ServiceBusConnectionString", &k.ServiceBusConnectionString) + case "systemData": + err = unpopulate(val, "SystemData", &o.SystemData) delete(rawMsg, key) - case "vcName": - err = unpopulate(val, "VcName", &k.VcName) + case "type": + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type KubernetesSchema. -func (k KubernetesSchema) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OutboundRuleListResult. +func (o OutboundRuleListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "properties", k.Properties) + populate(objectMap, "nextLink", o.NextLink) + populate(objectMap, "value", o.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type KubernetesSchema. -func (k *KubernetesSchema) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OutboundRuleListResult. +func (o *OutboundRuleListResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "properties": - err = unpopulate(val, "Properties", &k.Properties) + case "nextLink": + err = unpopulate(val, "NextLink", &o.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &o.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", k, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ListAmlUserFeatureResult. -func (l ListAmlUserFeatureResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OutputPathAssetReference. +func (o OutputPathAssetReference) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", l.NextLink) - populate(objectMap, "value", l.Value) + populate(objectMap, "jobId", o.JobID) + populate(objectMap, "path", o.Path) + objectMap["referenceType"] = ReferenceTypeOutputPath return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ListAmlUserFeatureResult. -func (l *ListAmlUserFeatureResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OutputPathAssetReference. +func (o *OutputPathAssetReference) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &l.NextLink) + case "jobId": + err = unpopulate(val, "JobID", &o.JobID) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &l.Value) + case "path": + err = unpopulate(val, "Path", &o.Path) + delete(rawMsg, key) + case "referenceType": + err = unpopulate(val, "ReferenceType", &o.ReferenceType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ListNotebookKeysResult. -func (l ListNotebookKeysResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PATAuthTypeWorkspaceConnectionProperties. +func (p PATAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "primaryAccessKey", l.PrimaryAccessKey) - populate(objectMap, "secondaryAccessKey", l.SecondaryAccessKey) + objectMap["authType"] = ConnectionAuthTypePAT + populate(objectMap, "category", p.Category) + populate(objectMap, "credentials", p.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", p.ExpiryTime) + populateAny(objectMap, "metadata", p.Metadata) + populate(objectMap, "target", p.Target) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ListNotebookKeysResult. -func (l *ListNotebookKeysResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PATAuthTypeWorkspaceConnectionProperties. +func (p *PATAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "primaryAccessKey": - err = unpopulate(val, "PrimaryAccessKey", &l.PrimaryAccessKey) + case "authType": + err = unpopulate(val, "AuthType", &p.AuthType) delete(rawMsg, key) - case "secondaryAccessKey": - err = unpopulate(val, "SecondaryAccessKey", &l.SecondaryAccessKey) + case "category": + err = unpopulate(val, "Category", &p.Category) + delete(rawMsg, key) + case "credentials": + err = unpopulate(val, "Credentials", &p.Credentials) + delete(rawMsg, key) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &p.ExpiryTime) + delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &p.Metadata) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &p.Target) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ListStorageAccountKeysResult. -func (l ListStorageAccountKeysResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PackageInputPathBase. +func (p PackageInputPathBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "userStorageKey", l.UserStorageKey) + objectMap["inputPathType"] = p.InputPathType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ListStorageAccountKeysResult. -func (l *ListStorageAccountKeysResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PackageInputPathBase. +func (p *PackageInputPathBase) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "userStorageKey": - err = unpopulate(val, "UserStorageKey", &l.UserStorageKey) + case "inputPathType": + err = unpopulate(val, "InputPathType", &p.InputPathType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ListUsagesResult. -func (l ListUsagesResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PackageInputPathID. +func (p PackageInputPathID) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", l.NextLink) - populate(objectMap, "value", l.Value) + objectMap["inputPathType"] = InputPathTypePathID + populate(objectMap, "resourceId", p.ResourceID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ListUsagesResult. -func (l *ListUsagesResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PackageInputPathID. +func (p *PackageInputPathID) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &l.NextLink) + case "inputPathType": + err = unpopulate(val, "InputPathType", &p.InputPathType) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &l.Value) + case "resourceId": + err = unpopulate(val, "ResourceID", &p.ResourceID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ListWorkspaceKeysResult. -func (l ListWorkspaceKeysResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PackageInputPathURL. +func (p PackageInputPathURL) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "appInsightsInstrumentationKey", l.AppInsightsInstrumentationKey) - populate(objectMap, "containerRegistryCredentials", l.ContainerRegistryCredentials) - populate(objectMap, "notebookAccessKeys", l.NotebookAccessKeys) - populate(objectMap, "userStorageKey", l.UserStorageKey) - populate(objectMap, "userStorageResourceId", l.UserStorageResourceID) + objectMap["inputPathType"] = InputPathTypeURL + populate(objectMap, "url", p.URL) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ListWorkspaceKeysResult. -func (l *ListWorkspaceKeysResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PackageInputPathURL. +func (p *PackageInputPathURL) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "appInsightsInstrumentationKey": - err = unpopulate(val, "AppInsightsInstrumentationKey", &l.AppInsightsInstrumentationKey) + case "inputPathType": + err = unpopulate(val, "InputPathType", &p.InputPathType) delete(rawMsg, key) - case "containerRegistryCredentials": - err = unpopulate(val, "ContainerRegistryCredentials", &l.ContainerRegistryCredentials) + case "url": + err = unpopulate(val, "URL", &p.URL) delete(rawMsg, key) - case "notebookAccessKeys": - err = unpopulate(val, "NotebookAccessKeys", &l.NotebookAccessKeys) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PackageInputPathVersion. +func (p PackageInputPathVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["inputPathType"] = InputPathTypePathVersion + populate(objectMap, "resourceName", p.ResourceName) + populate(objectMap, "resourceVersion", p.ResourceVersion) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PackageInputPathVersion. +func (p *PackageInputPathVersion) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "inputPathType": + err = unpopulate(val, "InputPathType", &p.InputPathType) delete(rawMsg, key) - case "userStorageKey": - err = unpopulate(val, "UserStorageKey", &l.UserStorageKey) + case "resourceName": + err = unpopulate(val, "ResourceName", &p.ResourceName) delete(rawMsg, key) - case "userStorageResourceId": - err = unpopulate(val, "UserStorageResourceID", &l.UserStorageResourceID) + case "resourceVersion": + err = unpopulate(val, "ResourceVersion", &p.ResourceVersion) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ListWorkspaceQuotas. -func (l ListWorkspaceQuotas) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PackageRequest. +func (p PackageRequest) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", l.NextLink) - populate(objectMap, "value", l.Value) + populate(objectMap, "baseEnvironmentSource", p.BaseEnvironmentSource) + populate(objectMap, "environmentVariables", p.EnvironmentVariables) + populate(objectMap, "inferencingServer", p.InferencingServer) + populate(objectMap, "inputs", p.Inputs) + populate(objectMap, "modelConfiguration", p.ModelConfiguration) + populate(objectMap, "tags", p.Tags) + populate(objectMap, "targetEnvironmentId", p.TargetEnvironmentID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ListWorkspaceQuotas. -func (l *ListWorkspaceQuotas) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PackageRequest. +func (p *PackageRequest) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &l.NextLink) + case "baseEnvironmentSource": + p.BaseEnvironmentSource, err = unmarshalBaseEnvironmentSourceClassification(val) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &l.Value) + case "environmentVariables": + err = unpopulate(val, "EnvironmentVariables", &p.EnvironmentVariables) + delete(rawMsg, key) + case "inferencingServer": + p.InferencingServer, err = unmarshalInferencingServerClassification(val) + delete(rawMsg, key) + case "inputs": + err = unpopulate(val, "Inputs", &p.Inputs) + delete(rawMsg, key) + case "modelConfiguration": + err = unpopulate(val, "ModelConfiguration", &p.ModelConfiguration) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) + delete(rawMsg, key) + case "targetEnvironmentId": + err = unpopulate(val, "TargetEnvironmentID", &p.TargetEnvironmentID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type LiteralJobInput. -func (l LiteralJobInput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PackageResponse. +func (p PackageResponse) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", l.Description) - objectMap["jobInputType"] = JobInputTypeLiteral - populate(objectMap, "value", l.Value) + populate(objectMap, "baseEnvironmentSource", p.BaseEnvironmentSource) + populate(objectMap, "buildId", p.BuildID) + populate(objectMap, "buildState", p.BuildState) + populate(objectMap, "environmentVariables", p.EnvironmentVariables) + populate(objectMap, "inferencingServer", p.InferencingServer) + populate(objectMap, "inputs", p.Inputs) + populate(objectMap, "logUrl", p.LogURL) + populate(objectMap, "modelConfiguration", p.ModelConfiguration) + populate(objectMap, "tags", p.Tags) + populate(objectMap, "targetEnvironmentId", p.TargetEnvironmentID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type LiteralJobInput. -func (l *LiteralJobInput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PackageResponse. +func (p *PackageResponse) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &l.Description) + case "baseEnvironmentSource": + p.BaseEnvironmentSource, err = unmarshalBaseEnvironmentSourceClassification(val) + delete(rawMsg, key) + case "buildId": + err = unpopulate(val, "BuildID", &p.BuildID) + delete(rawMsg, key) + case "buildState": + err = unpopulate(val, "BuildState", &p.BuildState) + delete(rawMsg, key) + case "environmentVariables": + err = unpopulate(val, "EnvironmentVariables", &p.EnvironmentVariables) + delete(rawMsg, key) + case "inferencingServer": + p.InferencingServer, err = unmarshalInferencingServerClassification(val) + delete(rawMsg, key) + case "inputs": + err = unpopulate(val, "Inputs", &p.Inputs) delete(rawMsg, key) - case "jobInputType": - err = unpopulate(val, "JobInputType", &l.JobInputType) + case "logUrl": + err = unpopulate(val, "LogURL", &p.LogURL) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &l.Value) + case "modelConfiguration": + err = unpopulate(val, "ModelConfiguration", &p.ModelConfiguration) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) + delete(rawMsg, key) + case "targetEnvironmentId": + err = unpopulate(val, "TargetEnvironmentID", &p.TargetEnvironmentID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", l, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type MLFlowModelJobInput. -func (m MLFlowModelJobInput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PaginatedComputeResourcesList. +func (p PaginatedComputeResourcesList) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", m.Description) - objectMap["jobInputType"] = JobInputTypeMlflowModel - populate(objectMap, "mode", m.Mode) - populate(objectMap, "uri", m.URI) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type MLFlowModelJobInput. -func (m *MLFlowModelJobInput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PaginatedComputeResourcesList. +func (p *PaginatedComputeResourcesList) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "jobInputType": - err = unpopulate(val, "JobInputType", &m.JobInputType) - delete(rawMsg, key) - case "mode": - err = unpopulate(val, "Mode", &m.Mode) + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) delete(rawMsg, key) - case "uri": - err = unpopulate(val, "URI", &m.URI) + case "value": + err = unpopulate(val, "Value", &p.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type MLFlowModelJobOutput. -func (m MLFlowModelJobOutput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialBatchDeployment. +func (p PartialBatchDeployment) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", m.Description) - objectMap["jobOutputType"] = JobOutputTypeMlflowModel - populate(objectMap, "mode", m.Mode) - populate(objectMap, "uri", m.URI) + populate(objectMap, "description", p.Description) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type MLFlowModelJobOutput. -func (m *MLFlowModelJobOutput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialBatchDeployment. +func (p *PartialBatchDeployment) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "jobOutputType": - err = unpopulate(val, "JobOutputType", &m.JobOutputType) - delete(rawMsg, key) - case "mode": - err = unpopulate(val, "Mode", &m.Mode) - delete(rawMsg, key) - case "uri": - err = unpopulate(val, "URI", &m.URI) + err = unpopulate(val, "Description", &p.Description) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type MLTableData. -func (m MLTableData) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties. +func (p PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["dataType"] = DataTypeMltable - populate(objectMap, "dataUri", m.DataURI) - populate(objectMap, "description", m.Description) - populate(objectMap, "isAnonymous", m.IsAnonymous) - populate(objectMap, "isArchived", m.IsArchived) - populate(objectMap, "properties", m.Properties) - populate(objectMap, "referencedUris", m.ReferencedUris) - populate(objectMap, "tags", m.Tags) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "tags", p.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type MLTableData. -func (m *MLTableData) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties. +func (p *PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "dataType": - err = unpopulate(val, "DataType", &m.DataType) - delete(rawMsg, key) - case "dataUri": - err = unpopulate(val, "DataURI", &m.DataURI) - delete(rawMsg, key) - case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "isAnonymous": - err = unpopulate(val, "IsAnonymous", &m.IsAnonymous) - delete(rawMsg, key) - case "isArchived": - err = unpopulate(val, "IsArchived", &m.IsArchived) - delete(rawMsg, key) case "properties": - err = unpopulate(val, "Properties", &m.Properties) - delete(rawMsg, key) - case "referencedUris": - err = unpopulate(val, "ReferencedUris", &m.ReferencedUris) + err = unpopulate(val, "Properties", &p.Properties) delete(rawMsg, key) case "tags": - err = unpopulate(val, "Tags", &m.Tags) + err = unpopulate(val, "Tags", &p.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type MLTableJobInput. -func (m MLTableJobInput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialJobBase. +func (p PartialJobBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", m.Description) - objectMap["jobInputType"] = JobInputTypeMltable - populate(objectMap, "mode", m.Mode) - populate(objectMap, "uri", m.URI) + populate(objectMap, "notificationSetting", p.NotificationSetting) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type MLTableJobInput. -func (m *MLTableJobInput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialJobBase. +func (p *PartialJobBase) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "jobInputType": - err = unpopulate(val, "JobInputType", &m.JobInputType) - delete(rawMsg, key) - case "mode": - err = unpopulate(val, "Mode", &m.Mode) - delete(rawMsg, key) - case "uri": - err = unpopulate(val, "URI", &m.URI) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &p.NotificationSetting) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type MLTableJobOutput. -func (m MLTableJobOutput) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialJobBasePartialResource. +func (p PartialJobBasePartialResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", m.Description) - objectMap["jobOutputType"] = JobOutputTypeMltable - populate(objectMap, "mode", m.Mode) - populate(objectMap, "uri", m.URI) + populate(objectMap, "properties", p.Properties) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type MLTableJobOutput. -func (m *MLTableJobOutput) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialJobBasePartialResource. +func (p *PartialJobBasePartialResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "jobOutputType": - err = unpopulate(val, "JobOutputType", &m.JobOutputType) - delete(rawMsg, key) - case "mode": - err = unpopulate(val, "Mode", &m.Mode) - delete(rawMsg, key) - case "uri": - err = unpopulate(val, "URI", &m.URI) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ManagedIdentity. -func (m ManagedIdentity) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialManagedServiceIdentity. +func (p PartialManagedServiceIdentity) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "clientId", m.ClientID) - objectMap["identityType"] = IdentityConfigurationTypeManaged - populate(objectMap, "objectId", m.ObjectID) - populate(objectMap, "resourceId", m.ResourceID) + populate(objectMap, "type", p.Type) + populate(objectMap, "userAssignedIdentities", p.UserAssignedIdentities) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedIdentity. -func (m *ManagedIdentity) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialManagedServiceIdentity. +func (p *PartialManagedServiceIdentity) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "clientId": - err = unpopulate(val, "ClientID", &m.ClientID) - delete(rawMsg, key) - case "identityType": - err = unpopulate(val, "IdentityType", &m.IdentityType) - delete(rawMsg, key) - case "objectId": - err = unpopulate(val, "ObjectID", &m.ObjectID) + case "type": + err = unpopulate(val, "Type", &p.Type) delete(rawMsg, key) - case "resourceId": - err = unpopulate(val, "ResourceID", &m.ResourceID) + case "userAssignedIdentities": + err = unpopulate(val, "UserAssignedIdentities", &p.UserAssignedIdentities) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ManagedIdentityAuthTypeWorkspaceConnectionProperties. -func (m ManagedIdentityAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialMinimalTrackedResource. +func (p PartialMinimalTrackedResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["authType"] = ConnectionAuthTypeManagedIdentity - populate(objectMap, "category", m.Category) - populate(objectMap, "credentials", m.Credentials) - populate(objectMap, "target", m.Target) - populate(objectMap, "value", m.Value) - populate(objectMap, "valueFormat", m.ValueFormat) + populate(objectMap, "tags", p.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedIdentityAuthTypeWorkspaceConnectionProperties. -func (m *ManagedIdentityAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialMinimalTrackedResource. +func (p *PartialMinimalTrackedResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "authType": - err = unpopulate(val, "AuthType", &m.AuthType) - delete(rawMsg, key) - case "category": - err = unpopulate(val, "Category", &m.Category) - delete(rawMsg, key) - case "credentials": - err = unpopulate(val, "Credentials", &m.Credentials) - delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &m.Target) - delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &m.Value) - delete(rawMsg, key) - case "valueFormat": - err = unpopulate(val, "ValueFormat", &m.ValueFormat) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) delete(rawMsg, key) - } - if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) - } - } - return nil -} - -// MarshalJSON implements the json.Marshaller interface for type ManagedOnlineDeployment. -func (m ManagedOnlineDeployment) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]any) - populate(objectMap, "appInsightsEnabled", m.AppInsightsEnabled) - populate(objectMap, "codeConfiguration", m.CodeConfiguration) - populate(objectMap, "description", m.Description) - populate(objectMap, "egressPublicNetworkAccess", m.EgressPublicNetworkAccess) - objectMap["endpointComputeType"] = EndpointComputeTypeManaged - populate(objectMap, "environmentId", m.EnvironmentID) - populate(objectMap, "environmentVariables", m.EnvironmentVariables) - populate(objectMap, "instanceType", m.InstanceType) - populate(objectMap, "livenessProbe", m.LivenessProbe) - populate(objectMap, "model", m.Model) - populate(objectMap, "modelMountPath", m.ModelMountPath) - populate(objectMap, "properties", m.Properties) - populate(objectMap, "provisioningState", m.ProvisioningState) - populate(objectMap, "readinessProbe", m.ReadinessProbe) - populate(objectMap, "requestSettings", m.RequestSettings) - populate(objectMap, "scaleSettings", m.ScaleSettings) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PartialMinimalTrackedResourceWithIdentity. +func (p PartialMinimalTrackedResourceWithIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "identity", p.Identity) + populate(objectMap, "tags", p.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedOnlineDeployment. -func (m *ManagedOnlineDeployment) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialMinimalTrackedResourceWithIdentity. +func (p *PartialMinimalTrackedResourceWithIdentity) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "appInsightsEnabled": - err = unpopulate(val, "AppInsightsEnabled", &m.AppInsightsEnabled) - delete(rawMsg, key) - case "codeConfiguration": - err = unpopulate(val, "CodeConfiguration", &m.CodeConfiguration) - delete(rawMsg, key) - case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "egressPublicNetworkAccess": - err = unpopulate(val, "EgressPublicNetworkAccess", &m.EgressPublicNetworkAccess) - delete(rawMsg, key) - case "endpointComputeType": - err = unpopulate(val, "EndpointComputeType", &m.EndpointComputeType) - delete(rawMsg, key) - case "environmentId": - err = unpopulate(val, "EnvironmentID", &m.EnvironmentID) - delete(rawMsg, key) - case "environmentVariables": - err = unpopulate(val, "EnvironmentVariables", &m.EnvironmentVariables) - delete(rawMsg, key) - case "instanceType": - err = unpopulate(val, "InstanceType", &m.InstanceType) - delete(rawMsg, key) - case "livenessProbe": - err = unpopulate(val, "LivenessProbe", &m.LivenessProbe) - delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &m.Model) - delete(rawMsg, key) - case "modelMountPath": - err = unpopulate(val, "ModelMountPath", &m.ModelMountPath) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &m.Properties) - delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &m.ProvisioningState) - delete(rawMsg, key) - case "readinessProbe": - err = unpopulate(val, "ReadinessProbe", &m.ReadinessProbe) - delete(rawMsg, key) - case "requestSettings": - err = unpopulate(val, "RequestSettings", &m.RequestSettings) + case "identity": + err = unpopulate(val, "Identity", &p.Identity) delete(rawMsg, key) - case "scaleSettings": - m.ScaleSettings, err = unmarshalOnlineScaleSettingsClassification(val) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ManagedServiceIdentity. -func (m ManagedServiceIdentity) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialMinimalTrackedResourceWithSKU. +func (p PartialMinimalTrackedResourceWithSKU) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "principalId", m.PrincipalID) - populate(objectMap, "tenantId", m.TenantID) - populate(objectMap, "type", m.Type) - populate(objectMap, "userAssignedIdentities", m.UserAssignedIdentities) + populate(objectMap, "sku", p.SKU) + populate(objectMap, "tags", p.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedServiceIdentity. -func (m *ManagedServiceIdentity) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialMinimalTrackedResourceWithSKU. +func (p *PartialMinimalTrackedResourceWithSKU) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "principalId": - err = unpopulate(val, "PrincipalID", &m.PrincipalID) - delete(rawMsg, key) - case "tenantId": - err = unpopulate(val, "TenantID", &m.TenantID) - delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &m.Type) + case "sku": + err = unpopulate(val, "SKU", &p.SKU) delete(rawMsg, key) - case "userAssignedIdentities": - err = unpopulate(val, "UserAssignedIdentities", &m.UserAssignedIdentities) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type MedianStoppingPolicy. -func (m MedianStoppingPolicy) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialNotificationSetting. +func (p PartialNotificationSetting) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "delayEvaluation", m.DelayEvaluation) - populate(objectMap, "evaluationInterval", m.EvaluationInterval) - objectMap["policyType"] = EarlyTerminationPolicyTypeMedianStopping + populate(objectMap, "webhooks", p.Webhooks) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type MedianStoppingPolicy. -func (m *MedianStoppingPolicy) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialNotificationSetting. +func (p *PartialNotificationSetting) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "delayEvaluation": - err = unpopulate(val, "DelayEvaluation", &m.DelayEvaluation) - delete(rawMsg, key) - case "evaluationInterval": - err = unpopulate(val, "EvaluationInterval", &m.EvaluationInterval) - delete(rawMsg, key) - case "policyType": - err = unpopulate(val, "PolicyType", &m.PolicyType) + case "webhooks": + p.Webhooks, err = unmarshalWebhookClassificationMap(val) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ModelContainer. -func (m ModelContainer) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialRegistryPartialTrackedResource. +func (p PartialRegistryPartialTrackedResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", m.ID) - populate(objectMap, "name", m.Name) - populate(objectMap, "properties", m.Properties) - populate(objectMap, "systemData", m.SystemData) - populate(objectMap, "type", m.Type) + populate(objectMap, "identity", p.Identity) + populate(objectMap, "sku", p.SKU) + populate(objectMap, "tags", p.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ModelContainer. -func (m *ModelContainer) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialRegistryPartialTrackedResource. +func (p *PartialRegistryPartialTrackedResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &m.ID) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &m.Name) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &m.Properties) + case "identity": + err = unpopulate(val, "Identity", &p.Identity) delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &m.SystemData) + case "sku": + err = unpopulate(val, "SKU", &p.SKU) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &m.Type) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ModelContainerProperties. -func (m ModelContainerProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PartialSKU. +func (p PartialSKU) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", m.Description) - populate(objectMap, "isArchived", m.IsArchived) - populate(objectMap, "latestVersion", m.LatestVersion) - populate(objectMap, "nextVersion", m.NextVersion) - populate(objectMap, "properties", m.Properties) - populate(objectMap, "tags", m.Tags) + populate(objectMap, "capacity", p.Capacity) + populate(objectMap, "family", p.Family) + populate(objectMap, "name", p.Name) + populate(objectMap, "size", p.Size) + populate(objectMap, "tier", p.Tier) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ModelContainerProperties. -func (m *ModelContainerProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PartialSKU. +func (p *PartialSKU) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "isArchived": - err = unpopulate(val, "IsArchived", &m.IsArchived) + case "capacity": + err = unpopulate(val, "Capacity", &p.Capacity) delete(rawMsg, key) - case "latestVersion": - err = unpopulate(val, "LatestVersion", &m.LatestVersion) + case "family": + err = unpopulate(val, "Family", &p.Family) delete(rawMsg, key) - case "nextVersion": - err = unpopulate(val, "NextVersion", &m.NextVersion) + case "name": + err = unpopulate(val, "Name", &p.Name) delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &m.Properties) + case "size": + err = unpopulate(val, "Size", &p.Size) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &m.Tags) + case "tier": + err = unpopulate(val, "Tier", &p.Tier) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ModelContainerResourceArmPaginatedResult. -func (m ModelContainerResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Password. +func (p Password) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", m.NextLink) - populate(objectMap, "value", m.Value) + populate(objectMap, "name", p.Name) + populate(objectMap, "value", p.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ModelContainerResourceArmPaginatedResult. -func (m *ModelContainerResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Password. +func (p *Password) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &m.NextLink) + case "name": + err = unpopulate(val, "Name", &p.Name) delete(rawMsg, key) case "value": - err = unpopulate(val, "Value", &m.Value) + err = unpopulate(val, "Value", &p.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ModelVersion. -func (m ModelVersion) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PendingUploadCredentialDto. +func (p PendingUploadCredentialDto) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", m.ID) - populate(objectMap, "name", m.Name) - populate(objectMap, "properties", m.Properties) - populate(objectMap, "systemData", m.SystemData) - populate(objectMap, "type", m.Type) + objectMap["credentialType"] = p.CredentialType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ModelVersion. -func (m *ModelVersion) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PendingUploadCredentialDto. +func (p *PendingUploadCredentialDto) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &m.ID) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &m.Name) + case "credentialType": + err = unpopulate(val, "CredentialType", &p.CredentialType) delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &m.Properties) - delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &m.SystemData) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PendingUploadRequestDto. +func (p PendingUploadRequestDto) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "pendingUploadId", p.PendingUploadID) + populate(objectMap, "pendingUploadType", p.PendingUploadType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PendingUploadRequestDto. +func (p *PendingUploadRequestDto) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "pendingUploadId": + err = unpopulate(val, "PendingUploadID", &p.PendingUploadID) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &m.Type) + case "pendingUploadType": + err = unpopulate(val, "PendingUploadType", &p.PendingUploadType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ModelVersionProperties. -func (m ModelVersionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PendingUploadResponseDto. +func (p PendingUploadResponseDto) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", m.Description) - populate(objectMap, "flavors", m.Flavors) - populate(objectMap, "isAnonymous", m.IsAnonymous) - populate(objectMap, "isArchived", m.IsArchived) - populate(objectMap, "jobName", m.JobName) - populate(objectMap, "modelType", m.ModelType) - populate(objectMap, "modelUri", m.ModelURI) - populate(objectMap, "properties", m.Properties) - populate(objectMap, "tags", m.Tags) + populate(objectMap, "blobReferenceForConsumption", p.BlobReferenceForConsumption) + populate(objectMap, "pendingUploadId", p.PendingUploadID) + populate(objectMap, "pendingUploadType", p.PendingUploadType) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ModelVersionProperties. -func (m *ModelVersionProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PendingUploadResponseDto. +func (p *PendingUploadResponseDto) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &m.Description) - delete(rawMsg, key) - case "flavors": - err = unpopulate(val, "Flavors", &m.Flavors) + case "blobReferenceForConsumption": + err = unpopulate(val, "BlobReferenceForConsumption", &p.BlobReferenceForConsumption) delete(rawMsg, key) - case "isAnonymous": - err = unpopulate(val, "IsAnonymous", &m.IsAnonymous) - delete(rawMsg, key) - case "isArchived": - err = unpopulate(val, "IsArchived", &m.IsArchived) - delete(rawMsg, key) - case "jobName": - err = unpopulate(val, "JobName", &m.JobName) - delete(rawMsg, key) - case "modelType": - err = unpopulate(val, "ModelType", &m.ModelType) - delete(rawMsg, key) - case "modelUri": - err = unpopulate(val, "ModelURI", &m.ModelURI) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &m.Properties) + case "pendingUploadId": + err = unpopulate(val, "PendingUploadID", &p.PendingUploadID) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &m.Tags) + case "pendingUploadType": + err = unpopulate(val, "PendingUploadType", &p.PendingUploadType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ModelVersionResourceArmPaginatedResult. -func (m ModelVersionResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PersonalComputeInstanceSettings. +func (p PersonalComputeInstanceSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", m.NextLink) - populate(objectMap, "value", m.Value) + populate(objectMap, "assignedUser", p.AssignedUser) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ModelVersionResourceArmPaginatedResult. -func (m *ModelVersionResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PersonalComputeInstanceSettings. +func (p *PersonalComputeInstanceSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &m.NextLink) - delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &m.Value) + case "assignedUser": + err = unpopulate(val, "AssignedUser", &p.AssignedUser) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Mpi. -func (m Mpi) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PipelineJob. +func (p PipelineJob) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["distributionType"] = DistributionTypeMpi - populate(objectMap, "processCountPerInstance", m.ProcessCountPerInstance) + populate(objectMap, "componentId", p.ComponentID) + populate(objectMap, "computeId", p.ComputeID) + populate(objectMap, "description", p.Description) + populate(objectMap, "displayName", p.DisplayName) + populate(objectMap, "experimentName", p.ExperimentName) + populate(objectMap, "identity", p.Identity) + populate(objectMap, "inputs", p.Inputs) + populate(objectMap, "isArchived", p.IsArchived) + objectMap["jobType"] = JobTypePipeline + populate(objectMap, "jobs", p.Jobs) + populate(objectMap, "notificationSetting", p.NotificationSetting) + populate(objectMap, "outputs", p.Outputs) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "secretsConfiguration", p.SecretsConfiguration) + populate(objectMap, "services", p.Services) + populateAny(objectMap, "settings", p.Settings) + populate(objectMap, "sourceJobId", p.SourceJobID) + populate(objectMap, "status", p.Status) + populate(objectMap, "tags", p.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Mpi. -func (m *Mpi) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PipelineJob. +func (p *PipelineJob) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "distributionType": - err = unpopulate(val, "DistributionType", &m.DistributionType) + case "componentId": + err = unpopulate(val, "ComponentID", &p.ComponentID) delete(rawMsg, key) - case "processCountPerInstance": - err = unpopulate(val, "ProcessCountPerInstance", &m.ProcessCountPerInstance) + case "computeId": + err = unpopulate(val, "ComputeID", &p.ComputeID) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &p.Description) + delete(rawMsg, key) + case "displayName": + err = unpopulate(val, "DisplayName", &p.DisplayName) + delete(rawMsg, key) + case "experimentName": + err = unpopulate(val, "ExperimentName", &p.ExperimentName) + delete(rawMsg, key) + case "identity": + p.Identity, err = unmarshalIdentityConfigurationClassification(val) + delete(rawMsg, key) + case "inputs": + p.Inputs, err = unmarshalJobInputClassificationMap(val) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &p.IsArchived) + delete(rawMsg, key) + case "jobType": + err = unpopulate(val, "JobType", &p.JobType) + delete(rawMsg, key) + case "jobs": + err = unpopulate(val, "Jobs", &p.Jobs) + delete(rawMsg, key) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &p.NotificationSetting) + delete(rawMsg, key) + case "outputs": + p.Outputs, err = unmarshalJobOutputClassificationMap(val) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "secretsConfiguration": + err = unpopulate(val, "SecretsConfiguration", &p.SecretsConfiguration) + delete(rawMsg, key) + case "services": + err = unpopulate(val, "Services", &p.Services) + delete(rawMsg, key) + case "settings": + err = unpopulate(val, "Settings", &p.Settings) + delete(rawMsg, key) + case "sourceJobId": + err = unpopulate(val, "SourceJobID", &p.SourceJobID) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &p.Status) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", m, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NCrossValidations. -func (n NCrossValidations) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PredictionDriftMetricThresholdBase. +func (p PredictionDriftMetricThresholdBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["mode"] = n.Mode + objectMap["dataType"] = p.DataType + populate(objectMap, "threshold", p.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NCrossValidations. -func (n *NCrossValidations) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PredictionDriftMetricThresholdBase. +func (p *PredictionDriftMetricThresholdBase) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "mode": - err = unpopulate(val, "Mode", &n.Mode) + case "dataType": + err = unpopulate(val, "DataType", &p.DataType) + delete(rawMsg, key) + case "threshold": + err = unpopulate(val, "Threshold", &p.Threshold) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NlpVertical. -func (n NlpVertical) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PredictionDriftMonitoringSignal. +func (p PredictionDriftMonitoringSignal) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "featurizationSettings", n.FeaturizationSettings) - populate(objectMap, "limitSettings", n.LimitSettings) - populate(objectMap, "validationData", n.ValidationData) + populate(objectMap, "metricThresholds", p.MetricThresholds) + populate(objectMap, "mode", p.Mode) + populate(objectMap, "modelType", p.ModelType) + populate(objectMap, "productionData", p.ProductionData) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "referenceData", p.ReferenceData) + objectMap["signalType"] = MonitoringSignalTypePredictionDrift return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NlpVertical. -func (n *NlpVertical) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PredictionDriftMonitoringSignal. +func (p *PredictionDriftMonitoringSignal) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "featurizationSettings": - err = unpopulate(val, "FeaturizationSettings", &n.FeaturizationSettings) + case "metricThresholds": + p.MetricThresholds, err = unmarshalPredictionDriftMetricThresholdBaseClassificationArray(val) delete(rawMsg, key) - case "limitSettings": - err = unpopulate(val, "LimitSettings", &n.LimitSettings) + case "mode": + err = unpopulate(val, "Mode", &p.Mode) delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &n.ValidationData) + case "modelType": + err = unpopulate(val, "ModelType", &p.ModelType) + delete(rawMsg, key) + case "productionData": + p.ProductionData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "referenceData": + p.ReferenceData, err = unmarshalMonitoringInputDataBaseClassification(val) + delete(rawMsg, key) + case "signalType": + err = unpopulate(val, "SignalType", &p.SignalType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NlpVerticalFeaturizationSettings. -func (n NlpVerticalFeaturizationSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpoint. +func (p PrivateEndpoint) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "datasetLanguage", n.DatasetLanguage) + populate(objectMap, "id", p.ID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NlpVerticalFeaturizationSettings. -func (n *NlpVerticalFeaturizationSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpoint. +func (p *PrivateEndpoint) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "datasetLanguage": - err = unpopulate(val, "DatasetLanguage", &n.DatasetLanguage) + case "id": + err = unpopulate(val, "ID", &p.ID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NlpVerticalLimitSettings. -func (n NlpVerticalLimitSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnection. +func (p PrivateEndpointConnection) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "maxConcurrentTrials", n.MaxConcurrentTrials) - populate(objectMap, "maxTrials", n.MaxTrials) - populate(objectMap, "timeout", n.Timeout) + populate(objectMap, "id", p.ID) + populate(objectMap, "identity", p.Identity) + populate(objectMap, "location", p.Location) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "sku", p.SKU) + populate(objectMap, "systemData", p.SystemData) + populate(objectMap, "tags", p.Tags) + populate(objectMap, "type", p.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NlpVerticalLimitSettings. -func (n *NlpVerticalLimitSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnection. +func (p *PrivateEndpointConnection) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "maxConcurrentTrials": - err = unpopulate(val, "MaxConcurrentTrials", &n.MaxConcurrentTrials) + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &p.Identity) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &p.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) delete(rawMsg, key) - case "maxTrials": - err = unpopulate(val, "MaxTrials", &n.MaxTrials) + case "sku": + err = unpopulate(val, "SKU", &p.SKU) delete(rawMsg, key) - case "timeout": - err = unpopulate(val, "Timeout", &n.Timeout) + case "systemData": + err = unpopulate(val, "SystemData", &p.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NodeStateCounts. -func (n NodeStateCounts) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnectionListResult. +func (p PrivateEndpointConnectionListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "idleNodeCount", n.IdleNodeCount) - populate(objectMap, "leavingNodeCount", n.LeavingNodeCount) - populate(objectMap, "preemptedNodeCount", n.PreemptedNodeCount) - populate(objectMap, "preparingNodeCount", n.PreparingNodeCount) - populate(objectMap, "runningNodeCount", n.RunningNodeCount) - populate(objectMap, "unusableNodeCount", n.UnusableNodeCount) + populate(objectMap, "value", p.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NodeStateCounts. -func (n *NodeStateCounts) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnectionListResult. +func (p *PrivateEndpointConnectionListResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "idleNodeCount": - err = unpopulate(val, "IdleNodeCount", &n.IdleNodeCount) - delete(rawMsg, key) - case "leavingNodeCount": - err = unpopulate(val, "LeavingNodeCount", &n.LeavingNodeCount) - delete(rawMsg, key) - case "preemptedNodeCount": - err = unpopulate(val, "PreemptedNodeCount", &n.PreemptedNodeCount) - delete(rawMsg, key) - case "preparingNodeCount": - err = unpopulate(val, "PreparingNodeCount", &n.PreparingNodeCount) - delete(rawMsg, key) - case "runningNodeCount": - err = unpopulate(val, "RunningNodeCount", &n.RunningNodeCount) - delete(rawMsg, key) - case "unusableNodeCount": - err = unpopulate(val, "UnusableNodeCount", &n.UnusableNodeCount) + case "value": + err = unpopulate(val, "Value", &p.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NoneAuthTypeWorkspaceConnectionProperties. -func (n NoneAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnectionProperties. +func (p PrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["authType"] = ConnectionAuthTypeNone - populate(objectMap, "category", n.Category) - populate(objectMap, "target", n.Target) - populate(objectMap, "value", n.Value) - populate(objectMap, "valueFormat", n.ValueFormat) + populate(objectMap, "privateEndpoint", p.PrivateEndpoint) + populate(objectMap, "privateLinkServiceConnectionState", p.PrivateLinkServiceConnectionState) + populate(objectMap, "provisioningState", p.ProvisioningState) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NoneAuthTypeWorkspaceConnectionProperties. -func (n *NoneAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnectionProperties. +func (p *PrivateEndpointConnectionProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "authType": - err = unpopulate(val, "AuthType", &n.AuthType) - delete(rawMsg, key) - case "category": - err = unpopulate(val, "Category", &n.Category) - delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &n.Target) + case "privateEndpoint": + err = unpopulate(val, "PrivateEndpoint", &p.PrivateEndpoint) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &n.Value) + case "privateLinkServiceConnectionState": + err = unpopulate(val, "PrivateLinkServiceConnectionState", &p.PrivateLinkServiceConnectionState) delete(rawMsg, key) - case "valueFormat": - err = unpopulate(val, "ValueFormat", &n.ValueFormat) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &p.ProvisioningState) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NoneDatastoreCredentials. -func (n NoneDatastoreCredentials) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointDestination. +func (p PrivateEndpointDestination) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["credentialsType"] = CredentialsTypeNone + populate(objectMap, "serviceResourceId", p.ServiceResourceID) + populate(objectMap, "sparkEnabled", p.SparkEnabled) + populate(objectMap, "sparkStatus", p.SparkStatus) + populate(objectMap, "subresourceTarget", p.SubresourceTarget) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NoneDatastoreCredentials. -func (n *NoneDatastoreCredentials) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointDestination. +func (p *PrivateEndpointDestination) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "credentialsType": - err = unpopulate(val, "CredentialsType", &n.CredentialsType) + case "serviceResourceId": + err = unpopulate(val, "ServiceResourceID", &p.ServiceResourceID) + delete(rawMsg, key) + case "sparkEnabled": + err = unpopulate(val, "SparkEnabled", &p.SparkEnabled) + delete(rawMsg, key) + case "sparkStatus": + err = unpopulate(val, "SparkStatus", &p.SparkStatus) + delete(rawMsg, key) + case "subresourceTarget": + err = unpopulate(val, "SubresourceTarget", &p.SubresourceTarget) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NotebookAccessTokenResult. -func (n NotebookAccessTokenResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointOutboundRule. +func (p PrivateEndpointOutboundRule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "accessToken", n.AccessToken) - populate(objectMap, "expiresIn", n.ExpiresIn) - populate(objectMap, "hostName", n.HostName) - populate(objectMap, "notebookResourceId", n.NotebookResourceID) - populate(objectMap, "publicDns", n.PublicDNS) - populate(objectMap, "refreshToken", n.RefreshToken) - populate(objectMap, "scope", n.Scope) - populate(objectMap, "tokenType", n.TokenType) + populate(objectMap, "category", p.Category) + populate(objectMap, "destination", p.Destination) + populate(objectMap, "status", p.Status) + objectMap["type"] = RuleTypePrivateEndpoint return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NotebookAccessTokenResult. -func (n *NotebookAccessTokenResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointOutboundRule. +func (p *PrivateEndpointOutboundRule) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "accessToken": - err = unpopulate(val, "AccessToken", &n.AccessToken) - delete(rawMsg, key) - case "expiresIn": - err = unpopulate(val, "ExpiresIn", &n.ExpiresIn) - delete(rawMsg, key) - case "hostName": - err = unpopulate(val, "HostName", &n.HostName) - delete(rawMsg, key) - case "notebookResourceId": - err = unpopulate(val, "NotebookResourceID", &n.NotebookResourceID) - delete(rawMsg, key) - case "publicDns": - err = unpopulate(val, "PublicDNS", &n.PublicDNS) + case "category": + err = unpopulate(val, "Category", &p.Category) delete(rawMsg, key) - case "refreshToken": - err = unpopulate(val, "RefreshToken", &n.RefreshToken) + case "destination": + err = unpopulate(val, "Destination", &p.Destination) delete(rawMsg, key) - case "scope": - err = unpopulate(val, "Scope", &n.Scope) + case "status": + err = unpopulate(val, "Status", &p.Status) delete(rawMsg, key) - case "tokenType": - err = unpopulate(val, "TokenType", &n.TokenType) + case "type": + err = unpopulate(val, "Type", &p.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NotebookPreparationError. -func (n NotebookPreparationError) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointResource. +func (p PrivateEndpointResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "errorMessage", n.ErrorMessage) - populate(objectMap, "statusCode", n.StatusCode) + populate(objectMap, "id", p.ID) + populate(objectMap, "subnetArmId", p.SubnetArmID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NotebookPreparationError. -func (n *NotebookPreparationError) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointResource. +func (p *PrivateEndpointResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "errorMessage": - err = unpopulate(val, "ErrorMessage", &n.ErrorMessage) + case "id": + err = unpopulate(val, "ID", &p.ID) delete(rawMsg, key) - case "statusCode": - err = unpopulate(val, "StatusCode", &n.StatusCode) + case "subnetArmId": + err = unpopulate(val, "SubnetArmID", &p.SubnetArmID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type NotebookResourceInfo. -func (n NotebookResourceInfo) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResource. +func (p PrivateLinkResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "fqdn", n.Fqdn) - populate(objectMap, "notebookPreparationError", n.NotebookPreparationError) - populate(objectMap, "resourceId", n.ResourceID) + populate(objectMap, "id", p.ID) + populate(objectMap, "identity", p.Identity) + populate(objectMap, "location", p.Location) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "sku", p.SKU) + populate(objectMap, "systemData", p.SystemData) + populate(objectMap, "tags", p.Tags) + populate(objectMap, "type", p.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type NotebookResourceInfo. -func (n *NotebookResourceInfo) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResource. +func (p *PrivateLinkResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "fqdn": - err = unpopulate(val, "Fqdn", &n.Fqdn) + case "id": + err = unpopulate(val, "ID", &p.ID) delete(rawMsg, key) - case "notebookPreparationError": - err = unpopulate(val, "NotebookPreparationError", &n.NotebookPreparationError) + case "identity": + err = unpopulate(val, "Identity", &p.Identity) delete(rawMsg, key) - case "resourceId": - err = unpopulate(val, "ResourceID", &n.ResourceID) + case "location": + err = unpopulate(val, "Location", &p.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "sku": + err = unpopulate(val, "SKU", &p.SKU) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &p.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &p.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", n, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Objective. -func (o Objective) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResourceListResult. +func (p PrivateLinkResourceListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "goal", o.Goal) - populate(objectMap, "primaryMetric", o.PrimaryMetric) + populate(objectMap, "value", p.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Objective. -func (o *Objective) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResourceListResult. +func (p *PrivateLinkResourceListResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "goal": - err = unpopulate(val, "Goal", &o.Goal) - delete(rawMsg, key) - case "primaryMetric": - err = unpopulate(val, "PrimaryMetric", &o.PrimaryMetric) + case "value": + err = unpopulate(val, "Value", &p.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineDeployment. -func (o OnlineDeployment) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResourceProperties. +func (p PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", o.ID) - populate(objectMap, "identity", o.Identity) - populate(objectMap, "kind", o.Kind) - populate(objectMap, "location", o.Location) - populate(objectMap, "name", o.Name) - populate(objectMap, "properties", o.Properties) - populate(objectMap, "sku", o.SKU) - populate(objectMap, "systemData", o.SystemData) - populate(objectMap, "tags", o.Tags) - populate(objectMap, "type", o.Type) + populate(objectMap, "groupId", p.GroupID) + populate(objectMap, "requiredMembers", p.RequiredMembers) + populate(objectMap, "requiredZoneNames", p.RequiredZoneNames) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineDeployment. -func (o *OnlineDeployment) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResourceProperties. +func (p *PrivateLinkResourceProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &o.ID) - delete(rawMsg, key) - case "identity": - err = unpopulate(val, "Identity", &o.Identity) - delete(rawMsg, key) - case "kind": - err = unpopulate(val, "Kind", &o.Kind) - delete(rawMsg, key) - case "location": - err = unpopulate(val, "Location", &o.Location) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &o.Name) - delete(rawMsg, key) - case "properties": - o.Properties, err = unmarshalOnlineDeploymentPropertiesClassification(val) - delete(rawMsg, key) - case "sku": - err = unpopulate(val, "SKU", &o.SKU) - delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &o.SystemData) + case "groupId": + err = unpopulate(val, "GroupID", &p.GroupID) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &o.Tags) + case "requiredMembers": + err = unpopulate(val, "RequiredMembers", &p.RequiredMembers) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &o.Type) + case "requiredZoneNames": + err = unpopulate(val, "RequiredZoneNames", &p.RequiredZoneNames) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineDeploymentProperties. -func (o OnlineDeploymentProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PrivateLinkServiceConnectionState. +func (p PrivateLinkServiceConnectionState) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "appInsightsEnabled", o.AppInsightsEnabled) - populate(objectMap, "codeConfiguration", o.CodeConfiguration) - populate(objectMap, "description", o.Description) - populate(objectMap, "egressPublicNetworkAccess", o.EgressPublicNetworkAccess) - objectMap["endpointComputeType"] = o.EndpointComputeType - populate(objectMap, "environmentId", o.EnvironmentID) - populate(objectMap, "environmentVariables", o.EnvironmentVariables) - populate(objectMap, "instanceType", o.InstanceType) - populate(objectMap, "livenessProbe", o.LivenessProbe) - populate(objectMap, "model", o.Model) - populate(objectMap, "modelMountPath", o.ModelMountPath) - populate(objectMap, "properties", o.Properties) - populate(objectMap, "provisioningState", o.ProvisioningState) - populate(objectMap, "readinessProbe", o.ReadinessProbe) - populate(objectMap, "requestSettings", o.RequestSettings) - populate(objectMap, "scaleSettings", o.ScaleSettings) + populate(objectMap, "actionsRequired", p.ActionsRequired) + populate(objectMap, "description", p.Description) + populate(objectMap, "status", p.Status) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineDeploymentProperties. -func (o *OnlineDeploymentProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkServiceConnectionState. +func (p *PrivateLinkServiceConnectionState) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "appInsightsEnabled": - err = unpopulate(val, "AppInsightsEnabled", &o.AppInsightsEnabled) - delete(rawMsg, key) - case "codeConfiguration": - err = unpopulate(val, "CodeConfiguration", &o.CodeConfiguration) + case "actionsRequired": + err = unpopulate(val, "ActionsRequired", &p.ActionsRequired) delete(rawMsg, key) case "description": - err = unpopulate(val, "Description", &o.Description) - delete(rawMsg, key) - case "egressPublicNetworkAccess": - err = unpopulate(val, "EgressPublicNetworkAccess", &o.EgressPublicNetworkAccess) - delete(rawMsg, key) - case "endpointComputeType": - err = unpopulate(val, "EndpointComputeType", &o.EndpointComputeType) - delete(rawMsg, key) - case "environmentId": - err = unpopulate(val, "EnvironmentID", &o.EnvironmentID) - delete(rawMsg, key) - case "environmentVariables": - err = unpopulate(val, "EnvironmentVariables", &o.EnvironmentVariables) - delete(rawMsg, key) - case "instanceType": - err = unpopulate(val, "InstanceType", &o.InstanceType) - delete(rawMsg, key) - case "livenessProbe": - err = unpopulate(val, "LivenessProbe", &o.LivenessProbe) - delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &o.Model) - delete(rawMsg, key) - case "modelMountPath": - err = unpopulate(val, "ModelMountPath", &o.ModelMountPath) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &o.Properties) - delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &o.ProvisioningState) - delete(rawMsg, key) - case "readinessProbe": - err = unpopulate(val, "ReadinessProbe", &o.ReadinessProbe) - delete(rawMsg, key) - case "requestSettings": - err = unpopulate(val, "RequestSettings", &o.RequestSettings) + err = unpopulate(val, "Description", &p.Description) delete(rawMsg, key) - case "scaleSettings": - o.ScaleSettings, err = unmarshalOnlineScaleSettingsClassification(val) + case "status": + err = unpopulate(val, "Status", &p.Status) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineDeploymentTrackedResourceArmPaginatedResult. -func (o OnlineDeploymentTrackedResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ProbeSettings. +func (p ProbeSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", o.NextLink) - populate(objectMap, "value", o.Value) + populate(objectMap, "failureThreshold", p.FailureThreshold) + populate(objectMap, "initialDelay", p.InitialDelay) + populate(objectMap, "period", p.Period) + populate(objectMap, "successThreshold", p.SuccessThreshold) + populate(objectMap, "timeout", p.Timeout) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineDeploymentTrackedResourceArmPaginatedResult. -func (o *OnlineDeploymentTrackedResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ProbeSettings. +func (p *ProbeSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &o.NextLink) + case "failureThreshold": + err = unpopulate(val, "FailureThreshold", &p.FailureThreshold) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &o.Value) + case "initialDelay": + err = unpopulate(val, "InitialDelay", &p.InitialDelay) + delete(rawMsg, key) + case "period": + err = unpopulate(val, "Period", &p.Period) + delete(rawMsg, key) + case "successThreshold": + err = unpopulate(val, "SuccessThreshold", &p.SuccessThreshold) + delete(rawMsg, key) + case "timeout": + err = unpopulate(val, "Timeout", &p.Timeout) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineEndpoint. -func (o OnlineEndpoint) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ProgressMetrics. +func (p ProgressMetrics) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", o.ID) - populate(objectMap, "identity", o.Identity) - populate(objectMap, "kind", o.Kind) - populate(objectMap, "location", o.Location) - populate(objectMap, "name", o.Name) - populate(objectMap, "properties", o.Properties) - populate(objectMap, "sku", o.SKU) - populate(objectMap, "systemData", o.SystemData) - populate(objectMap, "tags", o.Tags) - populate(objectMap, "type", o.Type) + populate(objectMap, "completedDatapointCount", p.CompletedDatapointCount) + populateTimeRFC3339(objectMap, "incrementalDataLastRefreshDateTime", p.IncrementalDataLastRefreshDateTime) + populate(objectMap, "skippedDatapointCount", p.SkippedDatapointCount) + populate(objectMap, "totalDatapointCount", p.TotalDatapointCount) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineEndpoint. -func (o *OnlineEndpoint) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ProgressMetrics. +func (p *ProgressMetrics) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &o.ID) - delete(rawMsg, key) - case "identity": - err = unpopulate(val, "Identity", &o.Identity) - delete(rawMsg, key) - case "kind": - err = unpopulate(val, "Kind", &o.Kind) - delete(rawMsg, key) - case "location": - err = unpopulate(val, "Location", &o.Location) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &o.Name) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &o.Properties) + case "completedDatapointCount": + err = unpopulate(val, "CompletedDatapointCount", &p.CompletedDatapointCount) delete(rawMsg, key) - case "sku": - err = unpopulate(val, "SKU", &o.SKU) - delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &o.SystemData) + case "incrementalDataLastRefreshDateTime": + err = unpopulateTimeRFC3339(val, "IncrementalDataLastRefreshDateTime", &p.IncrementalDataLastRefreshDateTime) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &o.Tags) + case "skippedDatapointCount": + err = unpopulate(val, "SkippedDatapointCount", &p.SkippedDatapointCount) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &o.Type) + case "totalDatapointCount": + err = unpopulate(val, "TotalDatapointCount", &p.TotalDatapointCount) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineEndpointProperties. -func (o OnlineEndpointProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PyTorch. +func (p PyTorch) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "authMode", o.AuthMode) - populate(objectMap, "compute", o.Compute) - populate(objectMap, "description", o.Description) - populate(objectMap, "keys", o.Keys) - populate(objectMap, "properties", o.Properties) - populate(objectMap, "provisioningState", o.ProvisioningState) - populate(objectMap, "publicNetworkAccess", o.PublicNetworkAccess) - populate(objectMap, "scoringUri", o.ScoringURI) - populate(objectMap, "swaggerUri", o.SwaggerURI) - populate(objectMap, "traffic", o.Traffic) + objectMap["distributionType"] = DistributionTypePyTorch + populate(objectMap, "processCountPerInstance", p.ProcessCountPerInstance) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineEndpointProperties. -func (o *OnlineEndpointProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PyTorch. +func (p *PyTorch) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "authMode": - err = unpopulate(val, "AuthMode", &o.AuthMode) - delete(rawMsg, key) - case "compute": - err = unpopulate(val, "Compute", &o.Compute) - delete(rawMsg, key) - case "description": - err = unpopulate(val, "Description", &o.Description) - delete(rawMsg, key) - case "keys": - err = unpopulate(val, "Keys", &o.Keys) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &o.Properties) - delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &o.ProvisioningState) - delete(rawMsg, key) - case "publicNetworkAccess": - err = unpopulate(val, "PublicNetworkAccess", &o.PublicNetworkAccess) - delete(rawMsg, key) - case "scoringUri": - err = unpopulate(val, "ScoringURI", &o.ScoringURI) - delete(rawMsg, key) - case "swaggerUri": - err = unpopulate(val, "SwaggerURI", &o.SwaggerURI) + case "distributionType": + err = unpopulate(val, "DistributionType", &p.DistributionType) delete(rawMsg, key) - case "traffic": - err = unpopulate(val, "Traffic", &o.Traffic) + case "processCountPerInstance": + err = unpopulate(val, "ProcessCountPerInstance", &p.ProcessCountPerInstance) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineEndpointTrackedResourceArmPaginatedResult. -func (o OnlineEndpointTrackedResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type QueueSettings. +func (q QueueSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", o.NextLink) - populate(objectMap, "value", o.Value) + populate(objectMap, "jobTier", q.JobTier) + populate(objectMap, "priority", q.Priority) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineEndpointTrackedResourceArmPaginatedResult. -func (o *OnlineEndpointTrackedResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type QueueSettings. +func (q *QueueSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", q, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &o.NextLink) + case "jobTier": + err = unpopulate(val, "JobTier", &q.JobTier) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &o.Value) + case "priority": + err = unpopulate(val, "Priority", &q.Priority) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", q, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineRequestSettings. -func (o OnlineRequestSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type QuotaBaseProperties. +func (q QuotaBaseProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "maxConcurrentRequestsPerInstance", o.MaxConcurrentRequestsPerInstance) - populate(objectMap, "maxQueueWait", o.MaxQueueWait) - populate(objectMap, "requestTimeout", o.RequestTimeout) + populate(objectMap, "id", q.ID) + populate(objectMap, "limit", q.Limit) + populate(objectMap, "type", q.Type) + populate(objectMap, "unit", q.Unit) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineRequestSettings. -func (o *OnlineRequestSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type QuotaBaseProperties. +func (q *QuotaBaseProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", q, err) } for key, val := range rawMsg { var err error switch key { - case "maxConcurrentRequestsPerInstance": - err = unpopulate(val, "MaxConcurrentRequestsPerInstance", &o.MaxConcurrentRequestsPerInstance) + case "id": + err = unpopulate(val, "ID", &q.ID) delete(rawMsg, key) - case "maxQueueWait": - err = unpopulate(val, "MaxQueueWait", &o.MaxQueueWait) + case "limit": + err = unpopulate(val, "Limit", &q.Limit) delete(rawMsg, key) - case "requestTimeout": - err = unpopulate(val, "RequestTimeout", &o.RequestTimeout) + case "type": + err = unpopulate(val, "Type", &q.Type) + delete(rawMsg, key) + case "unit": + err = unpopulate(val, "Unit", &q.Unit) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", q, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OnlineScaleSettings. -func (o OnlineScaleSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type QuotaUpdateParameters. +func (q QuotaUpdateParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["scaleType"] = o.ScaleType + populate(objectMap, "location", q.Location) + populate(objectMap, "value", q.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OnlineScaleSettings. -func (o *OnlineScaleSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type QuotaUpdateParameters. +func (q *QuotaUpdateParameters) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", q, err) } for key, val := range rawMsg { var err error switch key { - case "scaleType": - err = unpopulate(val, "ScaleType", &o.ScaleType) + case "location": + err = unpopulate(val, "Location", &q.Location) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &q.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", q, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type OutputPathAssetReference. -func (o OutputPathAssetReference) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RandomSamplingAlgorithm. +func (r RandomSamplingAlgorithm) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "jobId", o.JobID) - populate(objectMap, "path", o.Path) - objectMap["referenceType"] = ReferenceTypeOutputPath + populate(objectMap, "logbase", r.Logbase) + populate(objectMap, "rule", r.Rule) + objectMap["samplingAlgorithmType"] = SamplingAlgorithmTypeRandom + populate(objectMap, "seed", r.Seed) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type OutputPathAssetReference. -func (o *OutputPathAssetReference) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RandomSamplingAlgorithm. +func (r *RandomSamplingAlgorithm) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "jobId": - err = unpopulate(val, "JobID", &o.JobID) + case "logbase": + err = unpopulate(val, "Logbase", &r.Logbase) delete(rawMsg, key) - case "path": - err = unpopulate(val, "Path", &o.Path) + case "rule": + err = unpopulate(val, "Rule", &r.Rule) delete(rawMsg, key) - case "referenceType": - err = unpopulate(val, "ReferenceType", &o.ReferenceType) + case "samplingAlgorithmType": + err = unpopulate(val, "SamplingAlgorithmType", &r.SamplingAlgorithmType) + delete(rawMsg, key) + case "seed": + err = unpopulate(val, "Seed", &r.Seed) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", o, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PATAuthTypeWorkspaceConnectionProperties. -func (p PATAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Ray. +func (r Ray) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["authType"] = ConnectionAuthTypePAT - populate(objectMap, "category", p.Category) - populate(objectMap, "credentials", p.Credentials) - populate(objectMap, "target", p.Target) - populate(objectMap, "value", p.Value) - populate(objectMap, "valueFormat", p.ValueFormat) + populate(objectMap, "address", r.Address) + populate(objectMap, "dashboardPort", r.DashboardPort) + objectMap["distributionType"] = DistributionTypeRay + populate(objectMap, "headNodeAdditionalArgs", r.HeadNodeAdditionalArgs) + populate(objectMap, "includeDashboard", r.IncludeDashboard) + populate(objectMap, "port", r.Port) + populate(objectMap, "workerNodeAdditionalArgs", r.WorkerNodeAdditionalArgs) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PATAuthTypeWorkspaceConnectionProperties. -func (p *PATAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Ray. +func (r *Ray) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "authType": - err = unpopulate(val, "AuthType", &p.AuthType) + case "address": + err = unpopulate(val, "Address", &r.Address) delete(rawMsg, key) - case "category": - err = unpopulate(val, "Category", &p.Category) + case "dashboardPort": + err = unpopulate(val, "DashboardPort", &r.DashboardPort) delete(rawMsg, key) - case "credentials": - err = unpopulate(val, "Credentials", &p.Credentials) + case "distributionType": + err = unpopulate(val, "DistributionType", &r.DistributionType) delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &p.Target) + case "headNodeAdditionalArgs": + err = unpopulate(val, "HeadNodeAdditionalArgs", &r.HeadNodeAdditionalArgs) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &p.Value) + case "includeDashboard": + err = unpopulate(val, "IncludeDashboard", &r.IncludeDashboard) delete(rawMsg, key) - case "valueFormat": - err = unpopulate(val, "ValueFormat", &p.ValueFormat) + case "port": + err = unpopulate(val, "Port", &r.Port) + delete(rawMsg, key) + case "workerNodeAdditionalArgs": + err = unpopulate(val, "WorkerNodeAdditionalArgs", &r.WorkerNodeAdditionalArgs) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PaginatedComputeResourcesList. -func (p PaginatedComputeResourcesList) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Recurrence. +func (r Recurrence) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", p.NextLink) - populate(objectMap, "value", p.Value) + populate(objectMap, "frequency", r.Frequency) + populate(objectMap, "interval", r.Interval) + populate(objectMap, "schedule", r.Schedule) + populate(objectMap, "startTime", r.StartTime) + populate(objectMap, "timeZone", r.TimeZone) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PaginatedComputeResourcesList. -func (p *PaginatedComputeResourcesList) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Recurrence. +func (r *Recurrence) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &p.NextLink) + case "frequency": + err = unpopulate(val, "Frequency", &r.Frequency) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &p.Value) + case "interval": + err = unpopulate(val, "Interval", &r.Interval) + delete(rawMsg, key) + case "schedule": + err = unpopulate(val, "Schedule", &r.Schedule) + delete(rawMsg, key) + case "startTime": + err = unpopulate(val, "StartTime", &r.StartTime) + delete(rawMsg, key) + case "timeZone": + err = unpopulate(val, "TimeZone", &r.TimeZone) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PartialBatchDeployment. -func (p PartialBatchDeployment) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RecurrenceSchedule. +func (r RecurrenceSchedule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", p.Description) + populate(objectMap, "hours", r.Hours) + populate(objectMap, "minutes", r.Minutes) + populate(objectMap, "monthDays", r.MonthDays) + populate(objectMap, "weekDays", r.WeekDays) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PartialBatchDeployment. -func (p *PartialBatchDeployment) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RecurrenceSchedule. +func (r *RecurrenceSchedule) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &p.Description) + case "hours": + err = unpopulate(val, "Hours", &r.Hours) + delete(rawMsg, key) + case "minutes": + err = unpopulate(val, "Minutes", &r.Minutes) + delete(rawMsg, key) + case "monthDays": + err = unpopulate(val, "MonthDays", &r.MonthDays) + delete(rawMsg, key) + case "weekDays": + err = unpopulate(val, "WeekDays", &r.WeekDays) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties. -func (p PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RecurrenceTrigger. +func (r RecurrenceTrigger) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "properties", p.Properties) - populate(objectMap, "tags", p.Tags) + populate(objectMap, "endTime", r.EndTime) + populate(objectMap, "frequency", r.Frequency) + populate(objectMap, "interval", r.Interval) + populate(objectMap, "schedule", r.Schedule) + populate(objectMap, "startTime", r.StartTime) + populate(objectMap, "timeZone", r.TimeZone) + objectMap["triggerType"] = TriggerTypeRecurrence return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties. -func (p *PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RecurrenceTrigger. +func (r *RecurrenceTrigger) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "properties": - err = unpopulate(val, "Properties", &p.Properties) + case "endTime": + err = unpopulate(val, "EndTime", &r.EndTime) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &p.Tags) + case "frequency": + err = unpopulate(val, "Frequency", &r.Frequency) + delete(rawMsg, key) + case "interval": + err = unpopulate(val, "Interval", &r.Interval) + delete(rawMsg, key) + case "schedule": + err = unpopulate(val, "Schedule", &r.Schedule) + delete(rawMsg, key) + case "startTime": + err = unpopulate(val, "StartTime", &r.StartTime) + delete(rawMsg, key) + case "timeZone": + err = unpopulate(val, "TimeZone", &r.TimeZone) + delete(rawMsg, key) + case "triggerType": + err = unpopulate(val, "TriggerType", &r.TriggerType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PartialManagedServiceIdentity. -func (p PartialManagedServiceIdentity) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegenerateEndpointKeysRequest. +func (r RegenerateEndpointKeysRequest) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "type", p.Type) - populate(objectMap, "userAssignedIdentities", p.UserAssignedIdentities) + populate(objectMap, "keyType", r.KeyType) + populate(objectMap, "keyValue", r.KeyValue) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PartialManagedServiceIdentity. -func (p *PartialManagedServiceIdentity) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegenerateEndpointKeysRequest. +func (r *RegenerateEndpointKeysRequest) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error - switch key { - case "type": - err = unpopulate(val, "Type", &p.Type) + switch key { + case "keyType": + err = unpopulate(val, "KeyType", &r.KeyType) delete(rawMsg, key) - case "userAssignedIdentities": - err = unpopulate(val, "UserAssignedIdentities", &p.UserAssignedIdentities) + case "keyValue": + err = unpopulate(val, "KeyValue", &r.KeyValue) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PartialMinimalTrackedResource. -func (p PartialMinimalTrackedResource) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Registry. +func (r Registry) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "tags", p.Tags) + populate(objectMap, "id", r.ID) + populate(objectMap, "identity", r.Identity) + populate(objectMap, "kind", r.Kind) + populate(objectMap, "location", r.Location) + populate(objectMap, "name", r.Name) + populate(objectMap, "properties", r.Properties) + populate(objectMap, "sku", r.SKU) + populate(objectMap, "systemData", r.SystemData) + populate(objectMap, "tags", r.Tags) + populate(objectMap, "type", r.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PartialMinimalTrackedResource. -func (p *PartialMinimalTrackedResource) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Registry. +func (r *Registry) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { + case "id": + err = unpopulate(val, "ID", &r.ID) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &r.Identity) + delete(rawMsg, key) + case "kind": + err = unpopulate(val, "Kind", &r.Kind) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &r.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &r.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &r.Properties) + delete(rawMsg, key) + case "sku": + err = unpopulate(val, "SKU", &r.SKU) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &r.SystemData) + delete(rawMsg, key) case "tags": - err = unpopulate(val, "Tags", &p.Tags) + err = unpopulate(val, "Tags", &r.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &r.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PartialMinimalTrackedResourceWithIdentity. -func (p PartialMinimalTrackedResourceWithIdentity) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryListCredentialsResult. +func (r RegistryListCredentialsResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "identity", p.Identity) - populate(objectMap, "tags", p.Tags) + populate(objectMap, "location", r.Location) + populate(objectMap, "passwords", r.Passwords) + populate(objectMap, "username", r.Username) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PartialMinimalTrackedResourceWithIdentity. -func (p *PartialMinimalTrackedResourceWithIdentity) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryListCredentialsResult. +func (r *RegistryListCredentialsResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "identity": - err = unpopulate(val, "Identity", &p.Identity) + case "location": + err = unpopulate(val, "Location", &r.Location) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &p.Tags) + case "passwords": + err = unpopulate(val, "Passwords", &r.Passwords) + delete(rawMsg, key) + case "username": + err = unpopulate(val, "Username", &r.Username) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PartialMinimalTrackedResourceWithSKU. -func (p PartialMinimalTrackedResourceWithSKU) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryPartialManagedServiceIdentity. +func (r RegistryPartialManagedServiceIdentity) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "sku", p.SKU) - populate(objectMap, "tags", p.Tags) + populate(objectMap, "principalId", r.PrincipalID) + populate(objectMap, "tenantId", r.TenantID) + populate(objectMap, "type", r.Type) + populate(objectMap, "userAssignedIdentities", r.UserAssignedIdentities) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PartialMinimalTrackedResourceWithSKU. -func (p *PartialMinimalTrackedResourceWithSKU) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryPartialManagedServiceIdentity. +func (r *RegistryPartialManagedServiceIdentity) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "sku": - err = unpopulate(val, "SKU", &p.SKU) + case "principalId": + err = unpopulate(val, "PrincipalID", &r.PrincipalID) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &p.Tags) + case "tenantId": + err = unpopulate(val, "TenantID", &r.TenantID) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &r.Type) + delete(rawMsg, key) + case "userAssignedIdentities": + err = unpopulate(val, "UserAssignedIdentities", &r.UserAssignedIdentities) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PartialSKU. -func (p PartialSKU) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryPrivateEndpointConnection. +func (r RegistryPrivateEndpointConnection) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "capacity", p.Capacity) - populate(objectMap, "family", p.Family) - populate(objectMap, "name", p.Name) - populate(objectMap, "size", p.Size) - populate(objectMap, "tier", p.Tier) + populate(objectMap, "id", r.ID) + populate(objectMap, "location", r.Location) + populate(objectMap, "properties", r.Properties) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PartialSKU. -func (p *PartialSKU) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryPrivateEndpointConnection. +func (r *RegistryPrivateEndpointConnection) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "capacity": - err = unpopulate(val, "Capacity", &p.Capacity) - delete(rawMsg, key) - case "family": - err = unpopulate(val, "Family", &p.Family) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &p.Name) + case "id": + err = unpopulate(val, "ID", &r.ID) delete(rawMsg, key) - case "size": - err = unpopulate(val, "Size", &p.Size) + case "location": + err = unpopulate(val, "Location", &r.Location) delete(rawMsg, key) - case "tier": - err = unpopulate(val, "Tier", &p.Tier) + case "properties": + err = unpopulate(val, "Properties", &r.Properties) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Password. -func (p Password) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryPrivateEndpointConnectionProperties. +func (r RegistryPrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "name", p.Name) - populate(objectMap, "value", p.Value) + populate(objectMap, "groupIds", r.GroupIDs) + populate(objectMap, "privateEndpoint", r.PrivateEndpoint) + populate(objectMap, "provisioningState", r.ProvisioningState) + populate(objectMap, "registryPrivateLinkServiceConnectionState", r.RegistryPrivateLinkServiceConnectionState) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Password. -func (p *Password) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryPrivateEndpointConnectionProperties. +func (r *RegistryPrivateEndpointConnectionProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "name": - err = unpopulate(val, "Name", &p.Name) + case "groupIds": + err = unpopulate(val, "GroupIDs", &r.GroupIDs) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &p.Value) + case "privateEndpoint": + err = unpopulate(val, "PrivateEndpoint", &r.PrivateEndpoint) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &r.ProvisioningState) + delete(rawMsg, key) + case "registryPrivateLinkServiceConnectionState": + err = unpopulate(val, "RegistryPrivateLinkServiceConnectionState", &r.RegistryPrivateLinkServiceConnectionState) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PersonalComputeInstanceSettings. -func (p PersonalComputeInstanceSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryPrivateLinkServiceConnectionState. +func (r RegistryPrivateLinkServiceConnectionState) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "assignedUser", p.AssignedUser) + populate(objectMap, "actionsRequired", r.ActionsRequired) + populate(objectMap, "description", r.Description) + populate(objectMap, "status", r.Status) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PersonalComputeInstanceSettings. -func (p *PersonalComputeInstanceSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryPrivateLinkServiceConnectionState. +func (r *RegistryPrivateLinkServiceConnectionState) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "assignedUser": - err = unpopulate(val, "AssignedUser", &p.AssignedUser) + case "actionsRequired": + err = unpopulate(val, "ActionsRequired", &r.ActionsRequired) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &r.Description) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &r.Status) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PipelineJob. -func (p PipelineJob) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryProperties. +func (r RegistryProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "componentId", p.ComponentID) - populate(objectMap, "computeId", p.ComputeID) - populate(objectMap, "description", p.Description) - populate(objectMap, "displayName", p.DisplayName) - populate(objectMap, "experimentName", p.ExperimentName) - populate(objectMap, "identity", p.Identity) - populate(objectMap, "inputs", p.Inputs) - populate(objectMap, "isArchived", p.IsArchived) - objectMap["jobType"] = JobTypePipeline - populate(objectMap, "jobs", p.Jobs) - populate(objectMap, "outputs", p.Outputs) - populate(objectMap, "properties", p.Properties) - populate(objectMap, "services", p.Services) - populateAny(objectMap, "settings", p.Settings) - populate(objectMap, "sourceJobId", p.SourceJobID) - populate(objectMap, "status", p.Status) - populate(objectMap, "tags", p.Tags) + populate(objectMap, "discoveryUrl", r.DiscoveryURL) + populate(objectMap, "intellectualPropertyPublisher", r.IntellectualPropertyPublisher) + populate(objectMap, "managedResourceGroup", r.ManagedResourceGroup) + populate(objectMap, "mlFlowRegistryUri", r.MlFlowRegistryURI) + populate(objectMap, "publicNetworkAccess", r.PublicNetworkAccess) + populate(objectMap, "regionDetails", r.RegionDetails) + populate(objectMap, "registryPrivateEndpointConnections", r.RegistryPrivateEndpointConnections) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PipelineJob. -func (p *PipelineJob) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryProperties. +func (r *RegistryProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "componentId": - err = unpopulate(val, "ComponentID", &p.ComponentID) - delete(rawMsg, key) - case "computeId": - err = unpopulate(val, "ComputeID", &p.ComputeID) - delete(rawMsg, key) - case "description": - err = unpopulate(val, "Description", &p.Description) - delete(rawMsg, key) - case "displayName": - err = unpopulate(val, "DisplayName", &p.DisplayName) - delete(rawMsg, key) - case "experimentName": - err = unpopulate(val, "ExperimentName", &p.ExperimentName) - delete(rawMsg, key) - case "identity": - p.Identity, err = unmarshalIdentityConfigurationClassification(val) - delete(rawMsg, key) - case "inputs": - p.Inputs, err = unmarshalJobInputClassificationMap(val) - delete(rawMsg, key) - case "isArchived": - err = unpopulate(val, "IsArchived", &p.IsArchived) - delete(rawMsg, key) - case "jobType": - err = unpopulate(val, "JobType", &p.JobType) - delete(rawMsg, key) - case "jobs": - err = unpopulate(val, "Jobs", &p.Jobs) - delete(rawMsg, key) - case "outputs": - p.Outputs, err = unmarshalJobOutputClassificationMap(val) + case "discoveryUrl": + err = unpopulate(val, "DiscoveryURL", &r.DiscoveryURL) delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &p.Properties) + case "intellectualPropertyPublisher": + err = unpopulate(val, "IntellectualPropertyPublisher", &r.IntellectualPropertyPublisher) delete(rawMsg, key) - case "services": - err = unpopulate(val, "Services", &p.Services) + case "managedResourceGroup": + err = unpopulate(val, "ManagedResourceGroup", &r.ManagedResourceGroup) delete(rawMsg, key) - case "settings": - err = unpopulate(val, "Settings", &p.Settings) + case "mlFlowRegistryUri": + err = unpopulate(val, "MlFlowRegistryURI", &r.MlFlowRegistryURI) delete(rawMsg, key) - case "sourceJobId": - err = unpopulate(val, "SourceJobID", &p.SourceJobID) + case "publicNetworkAccess": + err = unpopulate(val, "PublicNetworkAccess", &r.PublicNetworkAccess) delete(rawMsg, key) - case "status": - err = unpopulate(val, "Status", &p.Status) + case "regionDetails": + err = unpopulate(val, "RegionDetails", &r.RegionDetails) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &p.Tags) + case "registryPrivateEndpointConnections": + err = unpopulate(val, "RegistryPrivateEndpointConnections", &r.RegistryPrivateEndpointConnections) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateEndpoint. -func (p PrivateEndpoint) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryRegionArmDetails. +func (r RegistryRegionArmDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", p.ID) - populate(objectMap, "subnetArmId", p.SubnetArmID) + populate(objectMap, "acrDetails", r.AcrDetails) + populate(objectMap, "location", r.Location) + populate(objectMap, "storageAccountDetails", r.StorageAccountDetails) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpoint. -func (p *PrivateEndpoint) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryRegionArmDetails. +func (r *RegistryRegionArmDetails) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &p.ID) + case "acrDetails": + err = unpopulate(val, "AcrDetails", &r.AcrDetails) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &r.Location) delete(rawMsg, key) - case "subnetArmId": - err = unpopulate(val, "SubnetArmID", &p.SubnetArmID) + case "storageAccountDetails": + err = unpopulate(val, "StorageAccountDetails", &r.StorageAccountDetails) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnection. -func (p PrivateEndpointConnection) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegistryTrackedResourceArmPaginatedResult. +func (r RegistryTrackedResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", p.ID) - populate(objectMap, "identity", p.Identity) - populate(objectMap, "location", p.Location) - populate(objectMap, "name", p.Name) - populate(objectMap, "properties", p.Properties) - populate(objectMap, "sku", p.SKU) - populate(objectMap, "systemData", p.SystemData) - populate(objectMap, "tags", p.Tags) - populate(objectMap, "type", p.Type) + populate(objectMap, "nextLink", r.NextLink) + populate(objectMap, "value", r.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnection. -func (p *PrivateEndpointConnection) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryTrackedResourceArmPaginatedResult. +func (r *RegistryTrackedResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &p.ID) - delete(rawMsg, key) - case "identity": - err = unpopulate(val, "Identity", &p.Identity) - delete(rawMsg, key) - case "location": - err = unpopulate(val, "Location", &p.Location) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &p.Name) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &p.Properties) - delete(rawMsg, key) - case "sku": - err = unpopulate(val, "SKU", &p.SKU) - delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &p.SystemData) - delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &p.Tags) + case "nextLink": + err = unpopulate(val, "NextLink", &r.NextLink) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &p.Type) + case "value": + err = unpopulate(val, "Value", &r.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnectionListResult. -func (p PrivateEndpointConnectionListResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Regression. +func (r Regression) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "value", p.Value) + populate(objectMap, "cvSplitColumnNames", r.CvSplitColumnNames) + populate(objectMap, "featurizationSettings", r.FeaturizationSettings) + populate(objectMap, "fixedParameters", r.FixedParameters) + populate(objectMap, "limitSettings", r.LimitSettings) + populate(objectMap, "logVerbosity", r.LogVerbosity) + populate(objectMap, "nCrossValidations", r.NCrossValidations) + populate(objectMap, "primaryMetric", r.PrimaryMetric) + populate(objectMap, "searchSpace", r.SearchSpace) + populate(objectMap, "sweepSettings", r.SweepSettings) + populate(objectMap, "targetColumnName", r.TargetColumnName) + objectMap["taskType"] = TaskTypeRegression + populate(objectMap, "testData", r.TestData) + populate(objectMap, "testDataSize", r.TestDataSize) + populate(objectMap, "trainingData", r.TrainingData) + populate(objectMap, "trainingSettings", r.TrainingSettings) + populate(objectMap, "validationData", r.ValidationData) + populate(objectMap, "validationDataSize", r.ValidationDataSize) + populate(objectMap, "weightColumnName", r.WeightColumnName) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnectionListResult. -func (p *PrivateEndpointConnectionListResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Regression. +func (r *Regression) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "value": - err = unpopulate(val, "Value", &p.Value) + case "cvSplitColumnNames": + err = unpopulate(val, "CvSplitColumnNames", &r.CvSplitColumnNames) + delete(rawMsg, key) + case "featurizationSettings": + err = unpopulate(val, "FeaturizationSettings", &r.FeaturizationSettings) + delete(rawMsg, key) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &r.FixedParameters) + delete(rawMsg, key) + case "limitSettings": + err = unpopulate(val, "LimitSettings", &r.LimitSettings) + delete(rawMsg, key) + case "logVerbosity": + err = unpopulate(val, "LogVerbosity", &r.LogVerbosity) + delete(rawMsg, key) + case "nCrossValidations": + r.NCrossValidations, err = unmarshalNCrossValidationsClassification(val) + delete(rawMsg, key) + case "primaryMetric": + err = unpopulate(val, "PrimaryMetric", &r.PrimaryMetric) + delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &r.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &r.SweepSettings) + delete(rawMsg, key) + case "targetColumnName": + err = unpopulate(val, "TargetColumnName", &r.TargetColumnName) + delete(rawMsg, key) + case "taskType": + err = unpopulate(val, "TaskType", &r.TaskType) + delete(rawMsg, key) + case "testData": + err = unpopulate(val, "TestData", &r.TestData) + delete(rawMsg, key) + case "testDataSize": + err = unpopulate(val, "TestDataSize", &r.TestDataSize) + delete(rawMsg, key) + case "trainingData": + err = unpopulate(val, "TrainingData", &r.TrainingData) + delete(rawMsg, key) + case "trainingSettings": + err = unpopulate(val, "TrainingSettings", &r.TrainingSettings) + delete(rawMsg, key) + case "validationData": + err = unpopulate(val, "ValidationData", &r.ValidationData) + delete(rawMsg, key) + case "validationDataSize": + err = unpopulate(val, "ValidationDataSize", &r.ValidationDataSize) + delete(rawMsg, key) + case "weightColumnName": + err = unpopulate(val, "WeightColumnName", &r.WeightColumnName) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnectionProperties. -func (p PrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegressionModelPerformanceMetricThreshold. +func (r RegressionModelPerformanceMetricThreshold) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "privateEndpoint", p.PrivateEndpoint) - populate(objectMap, "privateLinkServiceConnectionState", p.PrivateLinkServiceConnectionState) - populate(objectMap, "provisioningState", p.ProvisioningState) + populate(objectMap, "metric", r.Metric) + objectMap["modelType"] = MonitoringModelTypeRegression + populate(objectMap, "threshold", r.Threshold) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnectionProperties. -func (p *PrivateEndpointConnectionProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegressionModelPerformanceMetricThreshold. +func (r *RegressionModelPerformanceMetricThreshold) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "privateEndpoint": - err = unpopulate(val, "PrivateEndpoint", &p.PrivateEndpoint) + case "metric": + err = unpopulate(val, "Metric", &r.Metric) delete(rawMsg, key) - case "privateLinkServiceConnectionState": - err = unpopulate(val, "PrivateLinkServiceConnectionState", &p.PrivateLinkServiceConnectionState) + case "modelType": + err = unpopulate(val, "ModelType", &r.ModelType) delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &p.ProvisioningState) + case "threshold": + err = unpopulate(val, "Threshold", &r.Threshold) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResource. -func (p PrivateLinkResource) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RegressionTrainingSettings. +func (r RegressionTrainingSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", p.ID) - populate(objectMap, "identity", p.Identity) - populate(objectMap, "location", p.Location) - populate(objectMap, "name", p.Name) - populate(objectMap, "properties", p.Properties) - populate(objectMap, "sku", p.SKU) - populate(objectMap, "systemData", p.SystemData) - populate(objectMap, "tags", p.Tags) - populate(objectMap, "type", p.Type) + populate(objectMap, "allowedTrainingAlgorithms", r.AllowedTrainingAlgorithms) + populate(objectMap, "blockedTrainingAlgorithms", r.BlockedTrainingAlgorithms) + populate(objectMap, "enableDnnTraining", r.EnableDnnTraining) + populate(objectMap, "enableModelExplainability", r.EnableModelExplainability) + populate(objectMap, "enableOnnxCompatibleModels", r.EnableOnnxCompatibleModels) + populate(objectMap, "enableStackEnsemble", r.EnableStackEnsemble) + populate(objectMap, "enableVoteEnsemble", r.EnableVoteEnsemble) + populate(objectMap, "ensembleModelDownloadTimeout", r.EnsembleModelDownloadTimeout) + populate(objectMap, "stackEnsembleSettings", r.StackEnsembleSettings) + populate(objectMap, "trainingMode", r.TrainingMode) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResource. -func (p *PrivateLinkResource) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RegressionTrainingSettings. +func (r *RegressionTrainingSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &p.ID) + case "allowedTrainingAlgorithms": + err = unpopulate(val, "AllowedTrainingAlgorithms", &r.AllowedTrainingAlgorithms) delete(rawMsg, key) - case "identity": - err = unpopulate(val, "Identity", &p.Identity) + case "blockedTrainingAlgorithms": + err = unpopulate(val, "BlockedTrainingAlgorithms", &r.BlockedTrainingAlgorithms) delete(rawMsg, key) - case "location": - err = unpopulate(val, "Location", &p.Location) + case "enableDnnTraining": + err = unpopulate(val, "EnableDnnTraining", &r.EnableDnnTraining) delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &p.Name) + case "enableModelExplainability": + err = unpopulate(val, "EnableModelExplainability", &r.EnableModelExplainability) delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &p.Properties) + case "enableOnnxCompatibleModels": + err = unpopulate(val, "EnableOnnxCompatibleModels", &r.EnableOnnxCompatibleModels) delete(rawMsg, key) - case "sku": - err = unpopulate(val, "SKU", &p.SKU) + case "enableStackEnsemble": + err = unpopulate(val, "EnableStackEnsemble", &r.EnableStackEnsemble) delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &p.SystemData) + case "enableVoteEnsemble": + err = unpopulate(val, "EnableVoteEnsemble", &r.EnableVoteEnsemble) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &p.Tags) + case "ensembleModelDownloadTimeout": + err = unpopulate(val, "EnsembleModelDownloadTimeout", &r.EnsembleModelDownloadTimeout) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &p.Type) + case "stackEnsembleSettings": + err = unpopulate(val, "StackEnsembleSettings", &r.StackEnsembleSettings) + delete(rawMsg, key) + case "trainingMode": + err = unpopulate(val, "TrainingMode", &r.TrainingMode) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResourceListResult. -func (p PrivateLinkResourceListResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type RequestLogging. +func (r RequestLogging) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "value", p.Value) + populate(objectMap, "captureHeaders", r.CaptureHeaders) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResourceListResult. -func (p *PrivateLinkResourceListResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type RequestLogging. +func (r *RequestLogging) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "value": - err = unpopulate(val, "Value", &p.Value) + case "captureHeaders": + err = unpopulate(val, "CaptureHeaders", &r.CaptureHeaders) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResourceProperties. -func (p PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Resource. +func (r Resource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "groupId", p.GroupID) - populate(objectMap, "requiredMembers", p.RequiredMembers) - populate(objectMap, "requiredZoneNames", p.RequiredZoneNames) + populate(objectMap, "id", r.ID) + populate(objectMap, "name", r.Name) + populate(objectMap, "systemData", r.SystemData) + populate(objectMap, "type", r.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResourceProperties. -func (p *PrivateLinkResourceProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Resource. +func (r *Resource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "groupId": - err = unpopulate(val, "GroupID", &p.GroupID) + case "id": + err = unpopulate(val, "ID", &r.ID) delete(rawMsg, key) - case "requiredMembers": - err = unpopulate(val, "RequiredMembers", &p.RequiredMembers) + case "name": + err = unpopulate(val, "Name", &r.Name) delete(rawMsg, key) - case "requiredZoneNames": - err = unpopulate(val, "RequiredZoneNames", &p.RequiredZoneNames) + case "systemData": + err = unpopulate(val, "SystemData", &r.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &r.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PrivateLinkServiceConnectionState. -func (p PrivateLinkServiceConnectionState) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ResourceBase. +func (r ResourceBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "actionsRequired", p.ActionsRequired) - populate(objectMap, "description", p.Description) - populate(objectMap, "status", p.Status) + populate(objectMap, "description", r.Description) + populate(objectMap, "properties", r.Properties) + populate(objectMap, "tags", r.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkServiceConnectionState. -func (p *PrivateLinkServiceConnectionState) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceBase. +func (r *ResourceBase) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "actionsRequired": - err = unpopulate(val, "ActionsRequired", &p.ActionsRequired) - delete(rawMsg, key) case "description": - err = unpopulate(val, "Description", &p.Description) + err = unpopulate(val, "Description", &r.Description) delete(rawMsg, key) - case "status": - err = unpopulate(val, "Status", &p.Status) + case "properties": + err = unpopulate(val, "Properties", &r.Properties) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &r.Tags) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ProbeSettings. -func (p ProbeSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ResourceConfiguration. +func (r ResourceConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "failureThreshold", p.FailureThreshold) - populate(objectMap, "initialDelay", p.InitialDelay) - populate(objectMap, "period", p.Period) - populate(objectMap, "successThreshold", p.SuccessThreshold) - populate(objectMap, "timeout", p.Timeout) + populate(objectMap, "instanceCount", r.InstanceCount) + populate(objectMap, "instanceType", r.InstanceType) + populate(objectMap, "locations", r.Locations) + populate(objectMap, "maxInstanceCount", r.MaxInstanceCount) + populate(objectMap, "properties", r.Properties) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ProbeSettings. -func (p *ProbeSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceConfiguration. +func (r *ResourceConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "failureThreshold": - err = unpopulate(val, "FailureThreshold", &p.FailureThreshold) + case "instanceCount": + err = unpopulate(val, "InstanceCount", &r.InstanceCount) delete(rawMsg, key) - case "initialDelay": - err = unpopulate(val, "InitialDelay", &p.InitialDelay) + case "instanceType": + err = unpopulate(val, "InstanceType", &r.InstanceType) delete(rawMsg, key) - case "period": - err = unpopulate(val, "Period", &p.Period) + case "locations": + err = unpopulate(val, "Locations", &r.Locations) delete(rawMsg, key) - case "successThreshold": - err = unpopulate(val, "SuccessThreshold", &p.SuccessThreshold) + case "maxInstanceCount": + err = unpopulate(val, "MaxInstanceCount", &r.MaxInstanceCount) delete(rawMsg, key) - case "timeout": - err = unpopulate(val, "Timeout", &p.Timeout) + case "properties": + err = unpopulate(val, "Properties", &r.Properties) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PyTorch. -func (p PyTorch) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ResourceID. +func (r ResourceID) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["distributionType"] = DistributionTypePyTorch - populate(objectMap, "processCountPerInstance", p.ProcessCountPerInstance) + populate(objectMap, "id", r.ID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PyTorch. -func (p *PyTorch) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceID. +func (r *ResourceID) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "distributionType": - err = unpopulate(val, "DistributionType", &p.DistributionType) - delete(rawMsg, key) - case "processCountPerInstance": - err = unpopulate(val, "ProcessCountPerInstance", &p.ProcessCountPerInstance) + case "id": + err = unpopulate(val, "ID", &r.ID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type QuotaBaseProperties. -func (q QuotaBaseProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ResourceName. +func (r ResourceName) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", q.ID) - populate(objectMap, "limit", q.Limit) - populate(objectMap, "type", q.Type) - populate(objectMap, "unit", q.Unit) + populate(objectMap, "localizedValue", r.LocalizedValue) + populate(objectMap, "value", r.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type QuotaBaseProperties. -func (q *QuotaBaseProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceName. +func (r *ResourceName) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", q, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &q.ID) - delete(rawMsg, key) - case "limit": - err = unpopulate(val, "Limit", &q.Limit) - delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &q.Type) + case "localizedValue": + err = unpopulate(val, "LocalizedValue", &r.LocalizedValue) delete(rawMsg, key) - case "unit": - err = unpopulate(val, "Unit", &q.Unit) + case "value": + err = unpopulate(val, "Value", &r.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", q, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type QuotaUpdateParameters. -func (q QuotaUpdateParameters) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ResourceQuota. +func (r ResourceQuota) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "location", q.Location) - populate(objectMap, "value", q.Value) + populate(objectMap, "amlWorkspaceLocation", r.AmlWorkspaceLocation) + populate(objectMap, "id", r.ID) + populate(objectMap, "limit", r.Limit) + populate(objectMap, "name", r.Name) + populate(objectMap, "type", r.Type) + populate(objectMap, "unit", r.Unit) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type QuotaUpdateParameters. -func (q *QuotaUpdateParameters) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceQuota. +func (r *ResourceQuota) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", q, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } for key, val := range rawMsg { var err error switch key { - case "location": - err = unpopulate(val, "Location", &q.Location) + case "amlWorkspaceLocation": + err = unpopulate(val, "AmlWorkspaceLocation", &r.AmlWorkspaceLocation) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &q.Value) + case "id": + err = unpopulate(val, "ID", &r.ID) + delete(rawMsg, key) + case "limit": + err = unpopulate(val, "Limit", &r.Limit) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &r.Name) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &r.Type) + delete(rawMsg, key) + case "unit": + err = unpopulate(val, "Unit", &r.Unit) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", q, err) + return fmt.Errorf("unmarshalling type %T: %v", r, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type RandomSamplingAlgorithm. -func (r RandomSamplingAlgorithm) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Route. +func (r Route) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "rule", r.Rule) - objectMap["samplingAlgorithmType"] = SamplingAlgorithmTypeRandom - populate(objectMap, "seed", r.Seed) + populate(objectMap, "path", r.Path) + populate(objectMap, "port", r.Port) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type RandomSamplingAlgorithm. -func (r *RandomSamplingAlgorithm) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Route. +func (r *Route) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", r, err) @@ -11699,14 +19362,11 @@ func (r *RandomSamplingAlgorithm) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "rule": - err = unpopulate(val, "Rule", &r.Rule) - delete(rawMsg, key) - case "samplingAlgorithmType": - err = unpopulate(val, "SamplingAlgorithmType", &r.SamplingAlgorithmType) + case "path": + err = unpopulate(val, "Path", &r.Path) delete(rawMsg, key) - case "seed": - err = unpopulate(val, "Seed", &r.Seed) + case "port": + err = unpopulate(val, "Port", &r.Port) delete(rawMsg, key) } if err != nil { @@ -11716,563 +19376,474 @@ func (r *RandomSamplingAlgorithm) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type RecurrenceSchedule. -func (r RecurrenceSchedule) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SASAuthTypeWorkspaceConnectionProperties. +func (s SASAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "hours", r.Hours) - populate(objectMap, "minutes", r.Minutes) - populate(objectMap, "monthDays", r.MonthDays) - populate(objectMap, "weekDays", r.WeekDays) + objectMap["authType"] = ConnectionAuthTypeSAS + populate(objectMap, "category", s.Category) + populate(objectMap, "credentials", s.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", s.ExpiryTime) + populateAny(objectMap, "metadata", s.Metadata) + populate(objectMap, "target", s.Target) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type RecurrenceSchedule. -func (r *RecurrenceSchedule) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SASAuthTypeWorkspaceConnectionProperties. +func (s *SASAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "hours": - err = unpopulate(val, "Hours", &r.Hours) + case "authType": + err = unpopulate(val, "AuthType", &s.AuthType) delete(rawMsg, key) - case "minutes": - err = unpopulate(val, "Minutes", &r.Minutes) + case "category": + err = unpopulate(val, "Category", &s.Category) delete(rawMsg, key) - case "monthDays": - err = unpopulate(val, "MonthDays", &r.MonthDays) + case "credentials": + err = unpopulate(val, "Credentials", &s.Credentials) delete(rawMsg, key) - case "weekDays": - err = unpopulate(val, "WeekDays", &r.WeekDays) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &s.ExpiryTime) + delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &s.Metadata) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &s.Target) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type RecurrenceTrigger. -func (r RecurrenceTrigger) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SASCredentialDto. +func (s SASCredentialDto) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "endTime", r.EndTime) - populate(objectMap, "frequency", r.Frequency) - populate(objectMap, "interval", r.Interval) - populate(objectMap, "schedule", r.Schedule) - populate(objectMap, "startTime", r.StartTime) - populate(objectMap, "timeZone", r.TimeZone) - objectMap["triggerType"] = TriggerTypeRecurrence + objectMap["credentialType"] = PendingUploadCredentialTypeSAS + populate(objectMap, "sasUri", s.SasURI) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type RecurrenceTrigger. -func (r *RecurrenceTrigger) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SASCredentialDto. +func (s *SASCredentialDto) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error - switch key { - case "endTime": - err = unpopulate(val, "EndTime", &r.EndTime) - delete(rawMsg, key) - case "frequency": - err = unpopulate(val, "Frequency", &r.Frequency) - delete(rawMsg, key) - case "interval": - err = unpopulate(val, "Interval", &r.Interval) - delete(rawMsg, key) - case "schedule": - err = unpopulate(val, "Schedule", &r.Schedule) - delete(rawMsg, key) - case "startTime": - err = unpopulate(val, "StartTime", &r.StartTime) - delete(rawMsg, key) - case "timeZone": - err = unpopulate(val, "TimeZone", &r.TimeZone) + switch key { + case "credentialType": + err = unpopulate(val, "CredentialType", &s.CredentialType) delete(rawMsg, key) - case "triggerType": - err = unpopulate(val, "TriggerType", &r.TriggerType) + case "sasUri": + err = unpopulate(val, "SasURI", &s.SasURI) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type RegenerateEndpointKeysRequest. -func (r RegenerateEndpointKeysRequest) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SKU. +func (s SKU) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "keyType", r.KeyType) - populate(objectMap, "keyValue", r.KeyValue) + populate(objectMap, "capacity", s.Capacity) + populate(objectMap, "family", s.Family) + populate(objectMap, "name", s.Name) + populate(objectMap, "size", s.Size) + populate(objectMap, "tier", s.Tier) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type RegenerateEndpointKeysRequest. -func (r *RegenerateEndpointKeysRequest) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SKU. +func (s *SKU) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "keyType": - err = unpopulate(val, "KeyType", &r.KeyType) + case "capacity": + err = unpopulate(val, "Capacity", &s.Capacity) delete(rawMsg, key) - case "keyValue": - err = unpopulate(val, "KeyValue", &r.KeyValue) + case "family": + err = unpopulate(val, "Family", &s.Family) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &s.Name) + delete(rawMsg, key) + case "size": + err = unpopulate(val, "Size", &s.Size) + delete(rawMsg, key) + case "tier": + err = unpopulate(val, "Tier", &s.Tier) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type RegistryListCredentialsResult. -func (r RegistryListCredentialsResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SKUCapacity. +func (s SKUCapacity) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "location", r.Location) - populate(objectMap, "passwords", r.Passwords) - populate(objectMap, "username", r.Username) + populate(objectMap, "default", s.Default) + populate(objectMap, "maximum", s.Maximum) + populate(objectMap, "minimum", s.Minimum) + populate(objectMap, "scaleType", s.ScaleType) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type RegistryListCredentialsResult. -func (r *RegistryListCredentialsResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SKUCapacity. +func (s *SKUCapacity) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "location": - err = unpopulate(val, "Location", &r.Location) + case "default": + err = unpopulate(val, "Default", &s.Default) delete(rawMsg, key) - case "passwords": - err = unpopulate(val, "Passwords", &r.Passwords) + case "maximum": + err = unpopulate(val, "Maximum", &s.Maximum) delete(rawMsg, key) - case "username": - err = unpopulate(val, "Username", &r.Username) + case "minimum": + err = unpopulate(val, "Minimum", &s.Minimum) + delete(rawMsg, key) + case "scaleType": + err = unpopulate(val, "ScaleType", &s.ScaleType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Regression. -func (r Regression) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SKUResource. +func (s SKUResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "cvSplitColumnNames", r.CvSplitColumnNames) - populate(objectMap, "featurizationSettings", r.FeaturizationSettings) - populate(objectMap, "limitSettings", r.LimitSettings) - populate(objectMap, "logVerbosity", r.LogVerbosity) - populate(objectMap, "nCrossValidations", r.NCrossValidations) - populate(objectMap, "primaryMetric", r.PrimaryMetric) - populate(objectMap, "targetColumnName", r.TargetColumnName) - objectMap["taskType"] = TaskTypeRegression - populate(objectMap, "testData", r.TestData) - populate(objectMap, "testDataSize", r.TestDataSize) - populate(objectMap, "trainingData", r.TrainingData) - populate(objectMap, "trainingSettings", r.TrainingSettings) - populate(objectMap, "validationData", r.ValidationData) - populate(objectMap, "validationDataSize", r.ValidationDataSize) - populate(objectMap, "weightColumnName", r.WeightColumnName) + populate(objectMap, "capacity", s.Capacity) + populate(objectMap, "resourceType", s.ResourceType) + populate(objectMap, "sku", s.SKU) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Regression. -func (r *Regression) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SKUResource. +func (s *SKUResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "cvSplitColumnNames": - err = unpopulate(val, "CvSplitColumnNames", &r.CvSplitColumnNames) - delete(rawMsg, key) - case "featurizationSettings": - err = unpopulate(val, "FeaturizationSettings", &r.FeaturizationSettings) - delete(rawMsg, key) - case "limitSettings": - err = unpopulate(val, "LimitSettings", &r.LimitSettings) - delete(rawMsg, key) - case "logVerbosity": - err = unpopulate(val, "LogVerbosity", &r.LogVerbosity) - delete(rawMsg, key) - case "nCrossValidations": - r.NCrossValidations, err = unmarshalNCrossValidationsClassification(val) - delete(rawMsg, key) - case "primaryMetric": - err = unpopulate(val, "PrimaryMetric", &r.PrimaryMetric) - delete(rawMsg, key) - case "targetColumnName": - err = unpopulate(val, "TargetColumnName", &r.TargetColumnName) - delete(rawMsg, key) - case "taskType": - err = unpopulate(val, "TaskType", &r.TaskType) - delete(rawMsg, key) - case "testData": - err = unpopulate(val, "TestData", &r.TestData) - delete(rawMsg, key) - case "testDataSize": - err = unpopulate(val, "TestDataSize", &r.TestDataSize) - delete(rawMsg, key) - case "trainingData": - err = unpopulate(val, "TrainingData", &r.TrainingData) - delete(rawMsg, key) - case "trainingSettings": - err = unpopulate(val, "TrainingSettings", &r.TrainingSettings) - delete(rawMsg, key) - case "validationData": - err = unpopulate(val, "ValidationData", &r.ValidationData) + case "capacity": + err = unpopulate(val, "Capacity", &s.Capacity) delete(rawMsg, key) - case "validationDataSize": - err = unpopulate(val, "ValidationDataSize", &r.ValidationDataSize) + case "resourceType": + err = unpopulate(val, "ResourceType", &s.ResourceType) delete(rawMsg, key) - case "weightColumnName": - err = unpopulate(val, "WeightColumnName", &r.WeightColumnName) + case "sku": + err = unpopulate(val, "SKU", &s.SKU) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type RegressionTrainingSettings. -func (r RegressionTrainingSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SKUResourceArmPaginatedResult. +func (s SKUResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "allowedTrainingAlgorithms", r.AllowedTrainingAlgorithms) - populate(objectMap, "blockedTrainingAlgorithms", r.BlockedTrainingAlgorithms) - populate(objectMap, "enableDnnTraining", r.EnableDnnTraining) - populate(objectMap, "enableModelExplainability", r.EnableModelExplainability) - populate(objectMap, "enableOnnxCompatibleModels", r.EnableOnnxCompatibleModels) - populate(objectMap, "enableStackEnsemble", r.EnableStackEnsemble) - populate(objectMap, "enableVoteEnsemble", r.EnableVoteEnsemble) - populate(objectMap, "ensembleModelDownloadTimeout", r.EnsembleModelDownloadTimeout) - populate(objectMap, "stackEnsembleSettings", r.StackEnsembleSettings) + populate(objectMap, "nextLink", s.NextLink) + populate(objectMap, "value", s.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type RegressionTrainingSettings. -func (r *RegressionTrainingSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SKUResourceArmPaginatedResult. +func (s *SKUResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "allowedTrainingAlgorithms": - err = unpopulate(val, "AllowedTrainingAlgorithms", &r.AllowedTrainingAlgorithms) - delete(rawMsg, key) - case "blockedTrainingAlgorithms": - err = unpopulate(val, "BlockedTrainingAlgorithms", &r.BlockedTrainingAlgorithms) - delete(rawMsg, key) - case "enableDnnTraining": - err = unpopulate(val, "EnableDnnTraining", &r.EnableDnnTraining) - delete(rawMsg, key) - case "enableModelExplainability": - err = unpopulate(val, "EnableModelExplainability", &r.EnableModelExplainability) - delete(rawMsg, key) - case "enableOnnxCompatibleModels": - err = unpopulate(val, "EnableOnnxCompatibleModels", &r.EnableOnnxCompatibleModels) - delete(rawMsg, key) - case "enableStackEnsemble": - err = unpopulate(val, "EnableStackEnsemble", &r.EnableStackEnsemble) - delete(rawMsg, key) - case "enableVoteEnsemble": - err = unpopulate(val, "EnableVoteEnsemble", &r.EnableVoteEnsemble) - delete(rawMsg, key) - case "ensembleModelDownloadTimeout": - err = unpopulate(val, "EnsembleModelDownloadTimeout", &r.EnsembleModelDownloadTimeout) + case "nextLink": + err = unpopulate(val, "NextLink", &s.NextLink) delete(rawMsg, key) - case "stackEnsembleSettings": - err = unpopulate(val, "StackEnsembleSettings", &r.StackEnsembleSettings) + case "value": + err = unpopulate(val, "Value", &s.Value) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Resource. -func (r Resource) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SKUSetting. +func (s SKUSetting) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", r.ID) - populate(objectMap, "name", r.Name) - populate(objectMap, "systemData", r.SystemData) - populate(objectMap, "type", r.Type) + populate(objectMap, "name", s.Name) + populate(objectMap, "tier", s.Tier) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Resource. -func (r *Resource) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SKUSetting. +func (s *SKUSetting) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &r.ID) - delete(rawMsg, key) case "name": - err = unpopulate(val, "Name", &r.Name) - delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &r.SystemData) + err = unpopulate(val, "Name", &s.Name) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &r.Type) + case "tier": + err = unpopulate(val, "Tier", &s.Tier) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ResourceBase. -func (r ResourceBase) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SSLConfiguration. +func (s SSLConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", r.Description) - populate(objectMap, "properties", r.Properties) - populate(objectMap, "tags", r.Tags) + populate(objectMap, "cert", s.Cert) + populate(objectMap, "cname", s.Cname) + populate(objectMap, "key", s.Key) + populate(objectMap, "leafDomainLabel", s.LeafDomainLabel) + populate(objectMap, "overwriteExistingDomain", s.OverwriteExistingDomain) + populate(objectMap, "status", s.Status) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceBase. -func (r *ResourceBase) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SSLConfiguration. +func (s *SSLConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &r.Description) + case "cert": + err = unpopulate(val, "Cert", &s.Cert) delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &r.Properties) + case "cname": + err = unpopulate(val, "Cname", &s.Cname) delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &r.Tags) + case "key": + err = unpopulate(val, "Key", &s.Key) + delete(rawMsg, key) + case "leafDomainLabel": + err = unpopulate(val, "LeafDomainLabel", &s.LeafDomainLabel) + delete(rawMsg, key) + case "overwriteExistingDomain": + err = unpopulate(val, "OverwriteExistingDomain", &s.OverwriteExistingDomain) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &s.Status) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ResourceConfiguration. -func (r ResourceConfiguration) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SamplingAlgorithm. +func (s SamplingAlgorithm) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "instanceCount", r.InstanceCount) - populate(objectMap, "instanceType", r.InstanceType) - populate(objectMap, "properties", r.Properties) + objectMap["samplingAlgorithmType"] = s.SamplingAlgorithmType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceConfiguration. -func (r *ResourceConfiguration) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SamplingAlgorithm. +func (s *SamplingAlgorithm) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "instanceCount": - err = unpopulate(val, "InstanceCount", &r.InstanceCount) - delete(rawMsg, key) - case "instanceType": - err = unpopulate(val, "InstanceType", &r.InstanceType) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &r.Properties) + case "samplingAlgorithmType": + err = unpopulate(val, "SamplingAlgorithmType", &s.SamplingAlgorithmType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ResourceID. -func (r ResourceID) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SasDatastoreCredentials. +func (s SasDatastoreCredentials) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", r.ID) + objectMap["credentialsType"] = CredentialsTypeSas + populate(objectMap, "secrets", s.Secrets) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceID. -func (r *ResourceID) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SasDatastoreCredentials. +func (s *SasDatastoreCredentials) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &r.ID) + case "credentialsType": + err = unpopulate(val, "CredentialsType", &s.CredentialsType) + delete(rawMsg, key) + case "secrets": + err = unpopulate(val, "Secrets", &s.Secrets) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ResourceName. -func (r ResourceName) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SasDatastoreSecrets. +func (s SasDatastoreSecrets) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "localizedValue", r.LocalizedValue) - populate(objectMap, "value", r.Value) + populate(objectMap, "sasToken", s.SasToken) + objectMap["secretsType"] = SecretsTypeSas return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceName. -func (r *ResourceName) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SasDatastoreSecrets. +func (s *SasDatastoreSecrets) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "localizedValue": - err = unpopulate(val, "LocalizedValue", &r.LocalizedValue) + case "sasToken": + err = unpopulate(val, "SasToken", &s.SasToken) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &r.Value) + case "secretsType": + err = unpopulate(val, "SecretsType", &s.SecretsType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ResourceQuota. -func (r ResourceQuota) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScaleSettings. +func (s ScaleSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "amlWorkspaceLocation", r.AmlWorkspaceLocation) - populate(objectMap, "id", r.ID) - populate(objectMap, "limit", r.Limit) - populate(objectMap, "name", r.Name) - populate(objectMap, "type", r.Type) - populate(objectMap, "unit", r.Unit) + populate(objectMap, "maxNodeCount", s.MaxNodeCount) + populate(objectMap, "minNodeCount", s.MinNodeCount) + populate(objectMap, "nodeIdleTimeBeforeScaleDown", s.NodeIdleTimeBeforeScaleDown) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceQuota. -func (r *ResourceQuota) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScaleSettings. +func (s *ScaleSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "amlWorkspaceLocation": - err = unpopulate(val, "AmlWorkspaceLocation", &r.AmlWorkspaceLocation) - delete(rawMsg, key) - case "id": - err = unpopulate(val, "ID", &r.ID) - delete(rawMsg, key) - case "limit": - err = unpopulate(val, "Limit", &r.Limit) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &r.Name) + case "maxNodeCount": + err = unpopulate(val, "MaxNodeCount", &s.MaxNodeCount) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &r.Type) + case "minNodeCount": + err = unpopulate(val, "MinNodeCount", &s.MinNodeCount) delete(rawMsg, key) - case "unit": - err = unpopulate(val, "Unit", &r.Unit) + case "nodeIdleTimeBeforeScaleDown": + err = unpopulate(val, "NodeIdleTimeBeforeScaleDown", &s.NodeIdleTimeBeforeScaleDown) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Route. -func (r Route) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScaleSettingsInformation. +func (s ScaleSettingsInformation) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "path", r.Path) - populate(objectMap, "port", r.Port) + populate(objectMap, "scaleSettings", s.ScaleSettings) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Route. -func (r *Route) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScaleSettingsInformation. +func (s *ScaleSettingsInformation) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "path": - err = unpopulate(val, "Path", &r.Path) - delete(rawMsg, key) - case "port": - err = unpopulate(val, "Port", &r.Port) + case "scaleSettings": + err = unpopulate(val, "ScaleSettings", &s.ScaleSettings) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", r, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type SASAuthTypeWorkspaceConnectionProperties. -func (s SASAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Schedule. +func (s Schedule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["authType"] = ConnectionAuthTypeSAS - populate(objectMap, "category", s.Category) - populate(objectMap, "credentials", s.Credentials) - populate(objectMap, "target", s.Target) - populate(objectMap, "value", s.Value) - populate(objectMap, "valueFormat", s.ValueFormat) + populate(objectMap, "id", s.ID) + populate(objectMap, "name", s.Name) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "systemData", s.SystemData) + populate(objectMap, "type", s.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SASAuthTypeWorkspaceConnectionProperties. -func (s *SASAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Schedule. +func (s *Schedule) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12280,23 +19851,20 @@ func (s *SASAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) er for key, val := range rawMsg { var err error switch key { - case "authType": - err = unpopulate(val, "AuthType", &s.AuthType) - delete(rawMsg, key) - case "category": - err = unpopulate(val, "Category", &s.Category) + case "id": + err = unpopulate(val, "ID", &s.ID) delete(rawMsg, key) - case "credentials": - err = unpopulate(val, "Credentials", &s.Credentials) + case "name": + err = unpopulate(val, "Name", &s.Name) delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &s.Target) + case "properties": + err = unpopulate(val, "Properties", &s.Properties) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &s.Value) + case "systemData": + err = unpopulate(val, "SystemData", &s.SystemData) delete(rawMsg, key) - case "valueFormat": - err = unpopulate(val, "ValueFormat", &s.ValueFormat) + case "type": + err = unpopulate(val, "Type", &s.Type) delete(rawMsg, key) } if err != nil { @@ -12306,19 +19874,15 @@ func (s *SASAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) er return nil } -// MarshalJSON implements the json.Marshaller interface for type SKU. -func (s SKU) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScheduleActionBase. +func (s ScheduleActionBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "capacity", s.Capacity) - populate(objectMap, "family", s.Family) - populate(objectMap, "name", s.Name) - populate(objectMap, "size", s.Size) - populate(objectMap, "tier", s.Tier) + objectMap["actionType"] = s.ActionType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SKU. -func (s *SKU) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleActionBase. +func (s *ScheduleActionBase) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12326,20 +19890,8 @@ func (s *SKU) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "capacity": - err = unpopulate(val, "Capacity", &s.Capacity) - delete(rawMsg, key) - case "family": - err = unpopulate(val, "Family", &s.Family) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &s.Name) - delete(rawMsg, key) - case "size": - err = unpopulate(val, "Size", &s.Size) - delete(rawMsg, key) - case "tier": - err = unpopulate(val, "Tier", &s.Tier) + case "actionType": + err = unpopulate(val, "ActionType", &s.ActionType) delete(rawMsg, key) } if err != nil { @@ -12349,18 +19901,17 @@ func (s *SKU) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SKUCapacity. -func (s SKUCapacity) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScheduleBase. +func (s ScheduleBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "default", s.Default) - populate(objectMap, "maximum", s.Maximum) - populate(objectMap, "minimum", s.Minimum) - populate(objectMap, "scaleType", s.ScaleType) + populate(objectMap, "id", s.ID) + populate(objectMap, "provisioningStatus", s.ProvisioningStatus) + populate(objectMap, "status", s.Status) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SKUCapacity. -func (s *SKUCapacity) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleBase. +func (s *ScheduleBase) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12368,17 +19919,14 @@ func (s *SKUCapacity) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "default": - err = unpopulate(val, "Default", &s.Default) - delete(rawMsg, key) - case "maximum": - err = unpopulate(val, "Maximum", &s.Maximum) + case "id": + err = unpopulate(val, "ID", &s.ID) delete(rawMsg, key) - case "minimum": - err = unpopulate(val, "Minimum", &s.Minimum) + case "provisioningStatus": + err = unpopulate(val, "ProvisioningStatus", &s.ProvisioningStatus) delete(rawMsg, key) - case "scaleType": - err = unpopulate(val, "ScaleType", &s.ScaleType) + case "status": + err = unpopulate(val, "Status", &s.Status) delete(rawMsg, key) } if err != nil { @@ -12388,17 +19936,22 @@ func (s *SKUCapacity) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SKUResource. -func (s SKUResource) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScheduleProperties. +func (s ScheduleProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "capacity", s.Capacity) - populate(objectMap, "resourceType", s.ResourceType) - populate(objectMap, "sku", s.SKU) + populate(objectMap, "action", s.Action) + populate(objectMap, "description", s.Description) + populate(objectMap, "displayName", s.DisplayName) + populate(objectMap, "isEnabled", s.IsEnabled) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "provisioningState", s.ProvisioningState) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "trigger", s.Trigger) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SKUResource. -func (s *SKUResource) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleProperties. +func (s *ScheduleProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12406,14 +19959,29 @@ func (s *SKUResource) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "capacity": - err = unpopulate(val, "Capacity", &s.Capacity) + case "action": + s.Action, err = unmarshalScheduleActionBaseClassification(val) delete(rawMsg, key) - case "resourceType": - err = unpopulate(val, "ResourceType", &s.ResourceType) + case "description": + err = unpopulate(val, "Description", &s.Description) delete(rawMsg, key) - case "sku": - err = unpopulate(val, "SKU", &s.SKU) + case "displayName": + err = unpopulate(val, "DisplayName", &s.DisplayName) + delete(rawMsg, key) + case "isEnabled": + err = unpopulate(val, "IsEnabled", &s.IsEnabled) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &s.Properties) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &s.ProvisioningState) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + case "trigger": + s.Trigger, err = unmarshalTriggerBaseClassification(val) delete(rawMsg, key) } if err != nil { @@ -12423,16 +19991,16 @@ func (s *SKUResource) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SKUResourceArmPaginatedResult. -func (s SKUResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScheduleResourceArmPaginatedResult. +func (s ScheduleResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "nextLink", s.NextLink) populate(objectMap, "value", s.Value) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SKUResourceArmPaginatedResult. -func (s *SKUResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleResourceArmPaginatedResult. +func (s *ScheduleResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12454,16 +20022,18 @@ func (s *SKUResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SKUSetting. -func (s SKUSetting) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScriptReference. +func (s ScriptReference) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "name", s.Name) - populate(objectMap, "tier", s.Tier) + populate(objectMap, "scriptArguments", s.ScriptArguments) + populate(objectMap, "scriptData", s.ScriptData) + populate(objectMap, "scriptSource", s.ScriptSource) + populate(objectMap, "timeout", s.Timeout) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SKUSetting. -func (s *SKUSetting) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScriptReference. +func (s *ScriptReference) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12471,11 +20041,17 @@ func (s *SKUSetting) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "name": - err = unpopulate(val, "Name", &s.Name) + case "scriptArguments": + err = unpopulate(val, "ScriptArguments", &s.ScriptArguments) delete(rawMsg, key) - case "tier": - err = unpopulate(val, "Tier", &s.Tier) + case "scriptData": + err = unpopulate(val, "ScriptData", &s.ScriptData) + delete(rawMsg, key) + case "scriptSource": + err = unpopulate(val, "ScriptSource", &s.ScriptSource) + delete(rawMsg, key) + case "timeout": + err = unpopulate(val, "Timeout", &s.Timeout) delete(rawMsg, key) } if err != nil { @@ -12485,20 +20061,16 @@ func (s *SKUSetting) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SSLConfiguration. -func (s SSLConfiguration) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ScriptsToExecute. +func (s ScriptsToExecute) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "cert", s.Cert) - populate(objectMap, "cname", s.Cname) - populate(objectMap, "key", s.Key) - populate(objectMap, "leafDomainLabel", s.LeafDomainLabel) - populate(objectMap, "overwriteExistingDomain", s.OverwriteExistingDomain) - populate(objectMap, "status", s.Status) + populate(objectMap, "creationScript", s.CreationScript) + populate(objectMap, "startupScript", s.StartupScript) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SSLConfiguration. -func (s *SSLConfiguration) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ScriptsToExecute. +func (s *ScriptsToExecute) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12506,23 +20078,11 @@ func (s *SSLConfiguration) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "cert": - err = unpopulate(val, "Cert", &s.Cert) - delete(rawMsg, key) - case "cname": - err = unpopulate(val, "Cname", &s.Cname) - delete(rawMsg, key) - case "key": - err = unpopulate(val, "Key", &s.Key) - delete(rawMsg, key) - case "leafDomainLabel": - err = unpopulate(val, "LeafDomainLabel", &s.LeafDomainLabel) - delete(rawMsg, key) - case "overwriteExistingDomain": - err = unpopulate(val, "OverwriteExistingDomain", &s.OverwriteExistingDomain) + case "creationScript": + err = unpopulate(val, "CreationScript", &s.CreationScript) delete(rawMsg, key) - case "status": - err = unpopulate(val, "Status", &s.Status) + case "startupScript": + err = unpopulate(val, "StartupScript", &s.StartupScript) delete(rawMsg, key) } if err != nil { @@ -12532,15 +20092,15 @@ func (s *SSLConfiguration) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SamplingAlgorithm. -func (s SamplingAlgorithm) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type Seasonality. +func (s Seasonality) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["samplingAlgorithmType"] = s.SamplingAlgorithmType + objectMap["mode"] = s.Mode return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SamplingAlgorithm. -func (s *SamplingAlgorithm) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type Seasonality. +func (s *Seasonality) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12548,8 +20108,8 @@ func (s *SamplingAlgorithm) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "samplingAlgorithmType": - err = unpopulate(val, "SamplingAlgorithmType", &s.SamplingAlgorithmType) + case "mode": + err = unpopulate(val, "Mode", &s.Mode) delete(rawMsg, key) } if err != nil { @@ -12559,16 +20119,16 @@ func (s *SamplingAlgorithm) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SasDatastoreCredentials. -func (s SasDatastoreCredentials) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SecretConfiguration. +func (s SecretConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["credentialsType"] = CredentialsTypeSas - populate(objectMap, "secrets", s.Secrets) + populate(objectMap, "uri", s.URI) + populate(objectMap, "workspaceSecretName", s.WorkspaceSecretName) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SasDatastoreCredentials. -func (s *SasDatastoreCredentials) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretConfiguration. +func (s *SecretConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12576,11 +20136,11 @@ func (s *SasDatastoreCredentials) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "credentialsType": - err = unpopulate(val, "CredentialsType", &s.CredentialsType) + case "uri": + err = unpopulate(val, "URI", &s.URI) delete(rawMsg, key) - case "secrets": - err = unpopulate(val, "Secrets", &s.Secrets) + case "workspaceSecretName": + err = unpopulate(val, "WorkspaceSecretName", &s.WorkspaceSecretName) delete(rawMsg, key) } if err != nil { @@ -12590,16 +20150,15 @@ func (s *SasDatastoreCredentials) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SasDatastoreSecrets. -func (s SasDatastoreSecrets) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ServiceManagedResourcesSettings. +func (s ServiceManagedResourcesSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "sasToken", s.SasToken) - objectMap["secretsType"] = SecretsTypeSas + populate(objectMap, "cosmosDb", s.CosmosDb) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SasDatastoreSecrets. -func (s *SasDatastoreSecrets) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ServiceManagedResourcesSettings. +func (s *ServiceManagedResourcesSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12607,11 +20166,8 @@ func (s *SasDatastoreSecrets) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "sasToken": - err = unpopulate(val, "SasToken", &s.SasToken) - delete(rawMsg, key) - case "secretsType": - err = unpopulate(val, "SecretsType", &s.SecretsType) + case "cosmosDb": + err = unpopulate(val, "CosmosDb", &s.CosmosDb) delete(rawMsg, key) } if err != nil { @@ -12621,17 +20177,20 @@ func (s *SasDatastoreSecrets) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScaleSettings. -func (s ScaleSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ServicePrincipalAuthTypeWorkspaceConnectionProperties. +func (s ServicePrincipalAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "maxNodeCount", s.MaxNodeCount) - populate(objectMap, "minNodeCount", s.MinNodeCount) - populate(objectMap, "nodeIdleTimeBeforeScaleDown", s.NodeIdleTimeBeforeScaleDown) + objectMap["authType"] = ConnectionAuthTypeServicePrincipal + populate(objectMap, "category", s.Category) + populate(objectMap, "credentials", s.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", s.ExpiryTime) + populateAny(objectMap, "metadata", s.Metadata) + populate(objectMap, "target", s.Target) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScaleSettings. -func (s *ScaleSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ServicePrincipalAuthTypeWorkspaceConnectionProperties. +func (s *ServicePrincipalAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12639,14 +20198,23 @@ func (s *ScaleSettings) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "maxNodeCount": - err = unpopulate(val, "MaxNodeCount", &s.MaxNodeCount) + case "authType": + err = unpopulate(val, "AuthType", &s.AuthType) delete(rawMsg, key) - case "minNodeCount": - err = unpopulate(val, "MinNodeCount", &s.MinNodeCount) + case "category": + err = unpopulate(val, "Category", &s.Category) delete(rawMsg, key) - case "nodeIdleTimeBeforeScaleDown": - err = unpopulate(val, "NodeIdleTimeBeforeScaleDown", &s.NodeIdleTimeBeforeScaleDown) + case "credentials": + err = unpopulate(val, "Credentials", &s.Credentials) + delete(rawMsg, key) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &s.ExpiryTime) + delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &s.Metadata) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &s.Target) delete(rawMsg, key) } if err != nil { @@ -12656,15 +20224,20 @@ func (s *ScaleSettings) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScaleSettingsInformation. -func (s ScaleSettingsInformation) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ServicePrincipalDatastoreCredentials. +func (s ServicePrincipalDatastoreCredentials) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "scaleSettings", s.ScaleSettings) + populate(objectMap, "authorityUrl", s.AuthorityURL) + populate(objectMap, "clientId", s.ClientID) + objectMap["credentialsType"] = CredentialsTypeServicePrincipal + populate(objectMap, "resourceUrl", s.ResourceURL) + populate(objectMap, "secrets", s.Secrets) + populate(objectMap, "tenantId", s.TenantID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScaleSettingsInformation. -func (s *ScaleSettingsInformation) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ServicePrincipalDatastoreCredentials. +func (s *ServicePrincipalDatastoreCredentials) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12672,8 +20245,23 @@ func (s *ScaleSettingsInformation) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "scaleSettings": - err = unpopulate(val, "ScaleSettings", &s.ScaleSettings) + case "authorityUrl": + err = unpopulate(val, "AuthorityURL", &s.AuthorityURL) + delete(rawMsg, key) + case "clientId": + err = unpopulate(val, "ClientID", &s.ClientID) + delete(rawMsg, key) + case "credentialsType": + err = unpopulate(val, "CredentialsType", &s.CredentialsType) + delete(rawMsg, key) + case "resourceUrl": + err = unpopulate(val, "ResourceURL", &s.ResourceURL) + delete(rawMsg, key) + case "secrets": + err = unpopulate(val, "Secrets", &s.Secrets) + delete(rawMsg, key) + case "tenantId": + err = unpopulate(val, "TenantID", &s.TenantID) delete(rawMsg, key) } if err != nil { @@ -12683,19 +20271,16 @@ func (s *ScaleSettingsInformation) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type Schedule. -func (s Schedule) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ServicePrincipalDatastoreSecrets. +func (s ServicePrincipalDatastoreSecrets) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", s.ID) - populate(objectMap, "name", s.Name) - populate(objectMap, "properties", s.Properties) - populate(objectMap, "systemData", s.SystemData) - populate(objectMap, "type", s.Type) + populate(objectMap, "clientSecret", s.ClientSecret) + objectMap["secretsType"] = SecretsTypeServicePrincipal return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Schedule. -func (s *Schedule) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ServicePrincipalDatastoreSecrets. +func (s *ServicePrincipalDatastoreSecrets) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12703,20 +20288,11 @@ func (s *Schedule) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &s.ID) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &s.Name) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &s.Properties) - delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &s.SystemData) + case "clientSecret": + err = unpopulate(val, "ClientSecret", &s.ClientSecret) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &s.Type) + case "secretsType": + err = unpopulate(val, "SecretsType", &s.SecretsType) delete(rawMsg, key) } if err != nil { @@ -12726,15 +20302,19 @@ func (s *Schedule) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScheduleActionBase. -func (s ScheduleActionBase) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ServiceTagDestination. +func (s ServiceTagDestination) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["actionType"] = s.ActionType + populate(objectMap, "action", s.Action) + populate(objectMap, "addressPrefixes", s.AddressPrefixes) + populate(objectMap, "portRanges", s.PortRanges) + populate(objectMap, "protocol", s.Protocol) + populate(objectMap, "serviceTag", s.ServiceTag) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleActionBase. -func (s *ScheduleActionBase) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ServiceTagDestination. +func (s *ServiceTagDestination) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12742,8 +20322,20 @@ func (s *ScheduleActionBase) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "actionType": - err = unpopulate(val, "ActionType", &s.ActionType) + case "action": + err = unpopulate(val, "Action", &s.Action) + delete(rawMsg, key) + case "addressPrefixes": + err = unpopulate(val, "AddressPrefixes", &s.AddressPrefixes) + delete(rawMsg, key) + case "portRanges": + err = unpopulate(val, "PortRanges", &s.PortRanges) + delete(rawMsg, key) + case "protocol": + err = unpopulate(val, "Protocol", &s.Protocol) + delete(rawMsg, key) + case "serviceTag": + err = unpopulate(val, "ServiceTag", &s.ServiceTag) delete(rawMsg, key) } if err != nil { @@ -12753,17 +20345,18 @@ func (s *ScheduleActionBase) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScheduleBase. -func (s ScheduleBase) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ServiceTagOutboundRule. +func (s ServiceTagOutboundRule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", s.ID) - populate(objectMap, "provisioningStatus", s.ProvisioningStatus) + populate(objectMap, "category", s.Category) + populate(objectMap, "destination", s.Destination) populate(objectMap, "status", s.Status) + objectMap["type"] = RuleTypeServiceTag return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleBase. -func (s *ScheduleBase) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ServiceTagOutboundRule. +func (s *ServiceTagOutboundRule) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12771,15 +20364,18 @@ func (s *ScheduleBase) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &s.ID) + case "category": + err = unpopulate(val, "Category", &s.Category) delete(rawMsg, key) - case "provisioningStatus": - err = unpopulate(val, "ProvisioningStatus", &s.ProvisioningStatus) + case "destination": + err = unpopulate(val, "Destination", &s.Destination) delete(rawMsg, key) case "status": err = unpopulate(val, "Status", &s.Status) delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &s.Type) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12788,22 +20384,15 @@ func (s *ScheduleBase) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScheduleProperties. -func (s ScheduleProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SetupScripts. +func (s SetupScripts) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "action", s.Action) - populate(objectMap, "description", s.Description) - populate(objectMap, "displayName", s.DisplayName) - populate(objectMap, "isEnabled", s.IsEnabled) - populate(objectMap, "properties", s.Properties) - populate(objectMap, "provisioningState", s.ProvisioningState) - populate(objectMap, "tags", s.Tags) - populate(objectMap, "trigger", s.Trigger) + populate(objectMap, "scripts", s.Scripts) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleProperties. -func (s *ScheduleProperties) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SetupScripts. +func (s *SetupScripts) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12811,29 +20400,8 @@ func (s *ScheduleProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "action": - s.Action, err = unmarshalScheduleActionBaseClassification(val) - delete(rawMsg, key) - case "description": - err = unpopulate(val, "Description", &s.Description) - delete(rawMsg, key) - case "displayName": - err = unpopulate(val, "DisplayName", &s.DisplayName) - delete(rawMsg, key) - case "isEnabled": - err = unpopulate(val, "IsEnabled", &s.IsEnabled) - delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &s.Properties) - delete(rawMsg, key) - case "provisioningState": - err = unpopulate(val, "ProvisioningState", &s.ProvisioningState) - delete(rawMsg, key) - case "tags": - err = unpopulate(val, "Tags", &s.Tags) - delete(rawMsg, key) - case "trigger": - s.Trigger, err = unmarshalTriggerBaseClassification(val) + case "scripts": + err = unpopulate(val, "Scripts", &s.Scripts) delete(rawMsg, key) } if err != nil { @@ -12843,16 +20411,16 @@ func (s *ScheduleProperties) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScheduleResourceArmPaginatedResult. -func (s ScheduleResourceArmPaginatedResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SharedPrivateLinkResource. +func (s SharedPrivateLinkResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "nextLink", s.NextLink) - populate(objectMap, "value", s.Value) + populate(objectMap, "name", s.Name) + populate(objectMap, "properties", s.Properties) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduleResourceArmPaginatedResult. -func (s *ScheduleResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SharedPrivateLinkResource. +func (s *SharedPrivateLinkResource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12860,11 +20428,11 @@ func (s *ScheduleResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "nextLink": - err = unpopulate(val, "NextLink", &s.NextLink) + case "name": + err = unpopulate(val, "Name", &s.Name) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &s.Value) + case "properties": + err = unpopulate(val, "Properties", &s.Properties) delete(rawMsg, key) } if err != nil { @@ -12874,18 +20442,18 @@ func (s *ScheduleResourceArmPaginatedResult) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScriptReference. -func (s ScriptReference) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SharedPrivateLinkResourceProperty. +func (s SharedPrivateLinkResourceProperty) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "scriptArguments", s.ScriptArguments) - populate(objectMap, "scriptData", s.ScriptData) - populate(objectMap, "scriptSource", s.ScriptSource) - populate(objectMap, "timeout", s.Timeout) + populate(objectMap, "groupId", s.GroupID) + populate(objectMap, "privateLinkResourceId", s.PrivateLinkResourceID) + populate(objectMap, "requestMessage", s.RequestMessage) + populate(objectMap, "status", s.Status) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScriptReference. -func (s *ScriptReference) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SharedPrivateLinkResourceProperty. +func (s *SharedPrivateLinkResourceProperty) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12893,17 +20461,17 @@ func (s *ScriptReference) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "scriptArguments": - err = unpopulate(val, "ScriptArguments", &s.ScriptArguments) + case "groupId": + err = unpopulate(val, "GroupID", &s.GroupID) delete(rawMsg, key) - case "scriptData": - err = unpopulate(val, "ScriptData", &s.ScriptData) + case "privateLinkResourceId": + err = unpopulate(val, "PrivateLinkResourceID", &s.PrivateLinkResourceID) delete(rawMsg, key) - case "scriptSource": - err = unpopulate(val, "ScriptSource", &s.ScriptSource) + case "requestMessage": + err = unpopulate(val, "RequestMessage", &s.RequestMessage) delete(rawMsg, key) - case "timeout": - err = unpopulate(val, "Timeout", &s.Timeout) + case "status": + err = unpopulate(val, "Status", &s.Status) delete(rawMsg, key) } if err != nil { @@ -12913,16 +20481,41 @@ func (s *ScriptReference) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ScriptsToExecute. -func (s ScriptsToExecute) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SparkJob. +func (s SparkJob) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "creationScript", s.CreationScript) - populate(objectMap, "startupScript", s.StartupScript) + populate(objectMap, "archives", s.Archives) + populate(objectMap, "args", s.Args) + populate(objectMap, "codeId", s.CodeID) + populate(objectMap, "componentId", s.ComponentID) + populate(objectMap, "computeId", s.ComputeID) + populate(objectMap, "conf", s.Conf) + populate(objectMap, "description", s.Description) + populate(objectMap, "displayName", s.DisplayName) + populate(objectMap, "entry", s.Entry) + populate(objectMap, "environmentId", s.EnvironmentID) + populate(objectMap, "experimentName", s.ExperimentName) + populate(objectMap, "files", s.Files) + populate(objectMap, "identity", s.Identity) + populate(objectMap, "inputs", s.Inputs) + populate(objectMap, "isArchived", s.IsArchived) + populate(objectMap, "jars", s.Jars) + objectMap["jobType"] = JobTypeSpark + populate(objectMap, "notificationSetting", s.NotificationSetting) + populate(objectMap, "outputs", s.Outputs) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "pyFiles", s.PyFiles) + populate(objectMap, "queueSettings", s.QueueSettings) + populate(objectMap, "resources", s.Resources) + populate(objectMap, "secretsConfiguration", s.SecretsConfiguration) + populate(objectMap, "services", s.Services) + populate(objectMap, "status", s.Status) + populate(objectMap, "tags", s.Tags) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ScriptsToExecute. -func (s *ScriptsToExecute) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SparkJob. +func (s *SparkJob) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12930,11 +20523,86 @@ func (s *ScriptsToExecute) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "creationScript": - err = unpopulate(val, "CreationScript", &s.CreationScript) + case "archives": + err = unpopulate(val, "Archives", &s.Archives) delete(rawMsg, key) - case "startupScript": - err = unpopulate(val, "StartupScript", &s.StartupScript) + case "args": + err = unpopulate(val, "Args", &s.Args) + delete(rawMsg, key) + case "codeId": + err = unpopulate(val, "CodeID", &s.CodeID) + delete(rawMsg, key) + case "componentId": + err = unpopulate(val, "ComponentID", &s.ComponentID) + delete(rawMsg, key) + case "computeId": + err = unpopulate(val, "ComputeID", &s.ComputeID) + delete(rawMsg, key) + case "conf": + err = unpopulate(val, "Conf", &s.Conf) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &s.Description) + delete(rawMsg, key) + case "displayName": + err = unpopulate(val, "DisplayName", &s.DisplayName) + delete(rawMsg, key) + case "entry": + s.Entry, err = unmarshalSparkJobEntryClassification(val) + delete(rawMsg, key) + case "environmentId": + err = unpopulate(val, "EnvironmentID", &s.EnvironmentID) + delete(rawMsg, key) + case "experimentName": + err = unpopulate(val, "ExperimentName", &s.ExperimentName) + delete(rawMsg, key) + case "files": + err = unpopulate(val, "Files", &s.Files) + delete(rawMsg, key) + case "identity": + s.Identity, err = unmarshalIdentityConfigurationClassification(val) + delete(rawMsg, key) + case "inputs": + s.Inputs, err = unmarshalJobInputClassificationMap(val) + delete(rawMsg, key) + case "isArchived": + err = unpopulate(val, "IsArchived", &s.IsArchived) + delete(rawMsg, key) + case "jars": + err = unpopulate(val, "Jars", &s.Jars) + delete(rawMsg, key) + case "jobType": + err = unpopulate(val, "JobType", &s.JobType) + delete(rawMsg, key) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &s.NotificationSetting) + delete(rawMsg, key) + case "outputs": + s.Outputs, err = unmarshalJobOutputClassificationMap(val) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &s.Properties) + delete(rawMsg, key) + case "pyFiles": + err = unpopulate(val, "PyFiles", &s.PyFiles) + delete(rawMsg, key) + case "queueSettings": + err = unpopulate(val, "QueueSettings", &s.QueueSettings) + delete(rawMsg, key) + case "resources": + err = unpopulate(val, "Resources", &s.Resources) + delete(rawMsg, key) + case "secretsConfiguration": + err = unpopulate(val, "SecretsConfiguration", &s.SecretsConfiguration) + delete(rawMsg, key) + case "services": + err = unpopulate(val, "Services", &s.Services) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &s.Status) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) delete(rawMsg, key) } if err != nil { @@ -12944,15 +20612,15 @@ func (s *ScriptsToExecute) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type Seasonality. -func (s Seasonality) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SparkJobEntry. +func (s SparkJobEntry) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - objectMap["mode"] = s.Mode + objectMap["sparkJobEntryType"] = s.SparkJobEntryType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Seasonality. -func (s *Seasonality) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SparkJobEntry. +func (s *SparkJobEntry) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12960,8 +20628,8 @@ func (s *Seasonality) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "mode": - err = unpopulate(val, "Mode", &s.Mode) + case "sparkJobEntryType": + err = unpopulate(val, "SparkJobEntryType", &s.SparkJobEntryType) delete(rawMsg, key) } if err != nil { @@ -12971,15 +20639,16 @@ func (s *Seasonality) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ServiceManagedResourcesSettings. -func (s ServiceManagedResourcesSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SparkJobPythonEntry. +func (s SparkJobPythonEntry) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "cosmosDb", s.CosmosDb) + populate(objectMap, "file", s.File) + objectMap["sparkJobEntryType"] = SparkJobEntryTypeSparkJobPythonEntry return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ServiceManagedResourcesSettings. -func (s *ServiceManagedResourcesSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SparkJobPythonEntry. +func (s *SparkJobPythonEntry) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12987,8 +20656,11 @@ func (s *ServiceManagedResourcesSettings) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "cosmosDb": - err = unpopulate(val, "CosmosDb", &s.CosmosDb) + case "file": + err = unpopulate(val, "File", &s.File) + delete(rawMsg, key) + case "sparkJobEntryType": + err = unpopulate(val, "SparkJobEntryType", &s.SparkJobEntryType) delete(rawMsg, key) } if err != nil { @@ -12998,20 +20670,16 @@ func (s *ServiceManagedResourcesSettings) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ServicePrincipalDatastoreCredentials. -func (s ServicePrincipalDatastoreCredentials) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SparkJobScalaEntry. +func (s SparkJobScalaEntry) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "authorityUrl", s.AuthorityURL) - populate(objectMap, "clientId", s.ClientID) - objectMap["credentialsType"] = CredentialsTypeServicePrincipal - populate(objectMap, "resourceUrl", s.ResourceURL) - populate(objectMap, "secrets", s.Secrets) - populate(objectMap, "tenantId", s.TenantID) + populate(objectMap, "className", s.ClassName) + objectMap["sparkJobEntryType"] = SparkJobEntryTypeSparkJobScalaEntry return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ServicePrincipalDatastoreCredentials. -func (s *ServicePrincipalDatastoreCredentials) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SparkJobScalaEntry. +func (s *SparkJobScalaEntry) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -13019,23 +20687,11 @@ func (s *ServicePrincipalDatastoreCredentials) UnmarshalJSON(data []byte) error for key, val := range rawMsg { var err error switch key { - case "authorityUrl": - err = unpopulate(val, "AuthorityURL", &s.AuthorityURL) - delete(rawMsg, key) - case "clientId": - err = unpopulate(val, "ClientID", &s.ClientID) - delete(rawMsg, key) - case "credentialsType": - err = unpopulate(val, "CredentialsType", &s.CredentialsType) - delete(rawMsg, key) - case "resourceUrl": - err = unpopulate(val, "ResourceURL", &s.ResourceURL) - delete(rawMsg, key) - case "secrets": - err = unpopulate(val, "Secrets", &s.Secrets) + case "className": + err = unpopulate(val, "ClassName", &s.ClassName) delete(rawMsg, key) - case "tenantId": - err = unpopulate(val, "TenantID", &s.TenantID) + case "sparkJobEntryType": + err = unpopulate(val, "SparkJobEntryType", &s.SparkJobEntryType) delete(rawMsg, key) } if err != nil { @@ -13045,16 +20701,16 @@ func (s *ServicePrincipalDatastoreCredentials) UnmarshalJSON(data []byte) error return nil } -// MarshalJSON implements the json.Marshaller interface for type ServicePrincipalDatastoreSecrets. -func (s ServicePrincipalDatastoreSecrets) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type SparkResourceConfiguration. +func (s SparkResourceConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "clientSecret", s.ClientSecret) - objectMap["secretsType"] = SecretsTypeServicePrincipal + populate(objectMap, "instanceType", s.InstanceType) + populate(objectMap, "runtimeVersion", s.RuntimeVersion) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ServicePrincipalDatastoreSecrets. -func (s *ServicePrincipalDatastoreSecrets) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type SparkResourceConfiguration. +func (s *SparkResourceConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -13062,11 +20718,11 @@ func (s *ServicePrincipalDatastoreSecrets) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "clientSecret": - err = unpopulate(val, "ClientSecret", &s.ClientSecret) + case "instanceType": + err = unpopulate(val, "InstanceType", &s.InstanceType) delete(rawMsg, key) - case "secretsType": - err = unpopulate(val, "SecretsType", &s.SecretsType) + case "runtimeVersion": + err = unpopulate(val, "RuntimeVersion", &s.RuntimeVersion) delete(rawMsg, key) } if err != nil { @@ -13075,16 +20731,18 @@ func (s *ServicePrincipalDatastoreSecrets) UnmarshalJSON(data []byte) error { } return nil } - -// MarshalJSON implements the json.Marshaller interface for type SetupScripts. -func (s SetupScripts) MarshalJSON() ([]byte, error) { + +// MarshalJSON implements the json.Marshaller interface for type StackEnsembleSettings. +func (s StackEnsembleSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "scripts", s.Scripts) + populateAny(objectMap, "stackMetaLearnerKWargs", s.StackMetaLearnerKWargs) + populate(objectMap, "stackMetaLearnerTrainPercentage", s.StackMetaLearnerTrainPercentage) + populate(objectMap, "stackMetaLearnerType", s.StackMetaLearnerType) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SetupScripts. -func (s *SetupScripts) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type StackEnsembleSettings. +func (s *StackEnsembleSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -13092,8 +20750,14 @@ func (s *SetupScripts) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "scripts": - err = unpopulate(val, "Scripts", &s.Scripts) + case "stackMetaLearnerKWargs": + err = unpopulate(val, "StackMetaLearnerKWargs", &s.StackMetaLearnerKWargs) + delete(rawMsg, key) + case "stackMetaLearnerTrainPercentage": + err = unpopulate(val, "StackMetaLearnerTrainPercentage", &s.StackMetaLearnerTrainPercentage) + delete(rawMsg, key) + case "stackMetaLearnerType": + err = unpopulate(val, "StackMetaLearnerType", &s.StackMetaLearnerType) delete(rawMsg, key) } if err != nil { @@ -13103,16 +20767,22 @@ func (s *SetupScripts) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SharedPrivateLinkResource. -func (s SharedPrivateLinkResource) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type StaticInputData. +func (s StaticInputData) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "name", s.Name) - populate(objectMap, "properties", s.Properties) + populate(objectMap, "columns", s.Columns) + populate(objectMap, "dataContext", s.DataContext) + objectMap["inputDataType"] = MonitoringInputDataTypeStatic + populate(objectMap, "jobInputType", s.JobInputType) + populate(objectMap, "preprocessingComponentId", s.PreprocessingComponentID) + populate(objectMap, "uri", s.URI) + populateTimeRFC3339(objectMap, "windowEnd", s.WindowEnd) + populateTimeRFC3339(objectMap, "windowStart", s.WindowStart) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SharedPrivateLinkResource. -func (s *SharedPrivateLinkResource) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type StaticInputData. +func (s *StaticInputData) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -13120,11 +20790,29 @@ func (s *SharedPrivateLinkResource) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "name": - err = unpopulate(val, "Name", &s.Name) + case "columns": + err = unpopulate(val, "Columns", &s.Columns) delete(rawMsg, key) - case "properties": - err = unpopulate(val, "Properties", &s.Properties) + case "dataContext": + err = unpopulate(val, "DataContext", &s.DataContext) + delete(rawMsg, key) + case "inputDataType": + err = unpopulate(val, "InputDataType", &s.InputDataType) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &s.JobInputType) + delete(rawMsg, key) + case "preprocessingComponentId": + err = unpopulate(val, "PreprocessingComponentID", &s.PreprocessingComponentID) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &s.URI) + delete(rawMsg, key) + case "windowEnd": + err = unpopulateTimeRFC3339(val, "WindowEnd", &s.WindowEnd) + delete(rawMsg, key) + case "windowStart": + err = unpopulateTimeRFC3339(val, "WindowStart", &s.WindowStart) delete(rawMsg, key) } if err != nil { @@ -13134,18 +20822,18 @@ func (s *SharedPrivateLinkResource) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type SharedPrivateLinkResourceProperty. -func (s SharedPrivateLinkResourceProperty) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type StatusMessage. +func (s StatusMessage) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "groupId", s.GroupID) - populate(objectMap, "privateLinkResourceId", s.PrivateLinkResourceID) - populate(objectMap, "requestMessage", s.RequestMessage) - populate(objectMap, "status", s.Status) + populate(objectMap, "code", s.Code) + populateTimeRFC3339(objectMap, "createdDateTime", s.CreatedDateTime) + populate(objectMap, "level", s.Level) + populate(objectMap, "message", s.Message) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SharedPrivateLinkResourceProperty. -func (s *SharedPrivateLinkResourceProperty) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type StatusMessage. +func (s *StatusMessage) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -13153,17 +20841,17 @@ func (s *SharedPrivateLinkResourceProperty) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "groupId": - err = unpopulate(val, "GroupID", &s.GroupID) + case "code": + err = unpopulate(val, "Code", &s.Code) delete(rawMsg, key) - case "privateLinkResourceId": - err = unpopulate(val, "PrivateLinkResourceID", &s.PrivateLinkResourceID) + case "createdDateTime": + err = unpopulateTimeRFC3339(val, "CreatedDateTime", &s.CreatedDateTime) delete(rawMsg, key) - case "requestMessage": - err = unpopulate(val, "RequestMessage", &s.RequestMessage) + case "level": + err = unpopulate(val, "Level", &s.Level) delete(rawMsg, key) - case "status": - err = unpopulate(val, "Status", &s.Status) + case "message": + err = unpopulate(val, "Message", &s.Message) delete(rawMsg, key) } if err != nil { @@ -13173,17 +20861,16 @@ func (s *SharedPrivateLinkResourceProperty) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type StackEnsembleSettings. -func (s StackEnsembleSettings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type StorageAccountDetails. +func (s StorageAccountDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateAny(objectMap, "stackMetaLearnerKWargs", s.StackMetaLearnerKWargs) - populate(objectMap, "stackMetaLearnerTrainPercentage", s.StackMetaLearnerTrainPercentage) - populate(objectMap, "stackMetaLearnerType", s.StackMetaLearnerType) + populate(objectMap, "systemCreatedStorageAccount", s.SystemCreatedStorageAccount) + populate(objectMap, "userCreatedStorageAccount", s.UserCreatedStorageAccount) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type StackEnsembleSettings. -func (s *StackEnsembleSettings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageAccountDetails. +func (s *StorageAccountDetails) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -13191,14 +20878,11 @@ func (s *StackEnsembleSettings) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "stackMetaLearnerKWargs": - err = unpopulate(val, "StackMetaLearnerKWargs", &s.StackMetaLearnerKWargs) - delete(rawMsg, key) - case "stackMetaLearnerTrainPercentage": - err = unpopulate(val, "StackMetaLearnerTrainPercentage", &s.StackMetaLearnerTrainPercentage) + case "systemCreatedStorageAccount": + err = unpopulate(val, "SystemCreatedStorageAccount", &s.SystemCreatedStorageAccount) delete(rawMsg, key) - case "stackMetaLearnerType": - err = unpopulate(val, "StackMetaLearnerType", &s.StackMetaLearnerType) + case "userCreatedStorageAccount": + err = unpopulate(val, "UserCreatedStorageAccount", &s.UserCreatedStorageAccount) delete(rawMsg, key) } if err != nil { @@ -13222,11 +20906,14 @@ func (s SweepJob) MarshalJSON() ([]byte, error) { populate(objectMap, "isArchived", s.IsArchived) objectMap["jobType"] = JobTypeSweep populate(objectMap, "limits", s.Limits) + populate(objectMap, "notificationSetting", s.NotificationSetting) populate(objectMap, "objective", s.Objective) populate(objectMap, "outputs", s.Outputs) populate(objectMap, "properties", s.Properties) + populate(objectMap, "queueSettings", s.QueueSettings) populate(objectMap, "samplingAlgorithm", s.SamplingAlgorithm) populateAny(objectMap, "searchSpace", s.SearchSpace) + populate(objectMap, "secretsConfiguration", s.SecretsConfiguration) populate(objectMap, "services", s.Services) populate(objectMap, "status", s.Status) populate(objectMap, "tags", s.Tags) @@ -13276,6 +20963,9 @@ func (s *SweepJob) UnmarshalJSON(data []byte) error { case "limits": err = unpopulate(val, "Limits", &s.Limits) delete(rawMsg, key) + case "notificationSetting": + err = unpopulate(val, "NotificationSetting", &s.NotificationSetting) + delete(rawMsg, key) case "objective": err = unpopulate(val, "Objective", &s.Objective) delete(rawMsg, key) @@ -13285,12 +20975,18 @@ func (s *SweepJob) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &s.Properties) delete(rawMsg, key) + case "queueSettings": + err = unpopulate(val, "QueueSettings", &s.QueueSettings) + delete(rawMsg, key) case "samplingAlgorithm": s.SamplingAlgorithm, err = unmarshalSamplingAlgorithmClassification(val) delete(rawMsg, key) case "searchSpace": err = unpopulate(val, "SearchSpace", &s.SearchSpace) delete(rawMsg, key) + case "secretsConfiguration": + err = unpopulate(val, "SecretsConfiguration", &s.SecretsConfiguration) + delete(rawMsg, key) case "services": err = unpopulate(val, "Services", &s.Services) delete(rawMsg, key) @@ -13458,109 +21154,424 @@ func (s *SynapseSparkProperties) UnmarshalJSON(data []byte) error { case "nodeSize": err = unpopulate(val, "NodeSize", &s.NodeSize) delete(rawMsg, key) - case "nodeSizeFamily": - err = unpopulate(val, "NodeSizeFamily", &s.NodeSizeFamily) + case "nodeSizeFamily": + err = unpopulate(val, "NodeSizeFamily", &s.NodeSizeFamily) + delete(rawMsg, key) + case "poolName": + err = unpopulate(val, "PoolName", &s.PoolName) + delete(rawMsg, key) + case "resourceGroup": + err = unpopulate(val, "ResourceGroup", &s.ResourceGroup) + delete(rawMsg, key) + case "sparkVersion": + err = unpopulate(val, "SparkVersion", &s.SparkVersion) + delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &s.SubscriptionID) + delete(rawMsg, key) + case "workspaceName": + err = unpopulate(val, "WorkspaceName", &s.WorkspaceName) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SystemCreatedAcrAccount. +func (s SystemCreatedAcrAccount) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "acrAccountName", s.AcrAccountName) + populate(objectMap, "acrAccountSku", s.AcrAccountSKU) + populate(objectMap, "armResourceId", s.ArmResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SystemCreatedAcrAccount. +func (s *SystemCreatedAcrAccount) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "acrAccountName": + err = unpopulate(val, "AcrAccountName", &s.AcrAccountName) + delete(rawMsg, key) + case "acrAccountSku": + err = unpopulate(val, "AcrAccountSKU", &s.AcrAccountSKU) + delete(rawMsg, key) + case "armResourceId": + err = unpopulate(val, "ArmResourceID", &s.ArmResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SystemCreatedStorageAccount. +func (s SystemCreatedStorageAccount) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "allowBlobPublicAccess", s.AllowBlobPublicAccess) + populate(objectMap, "armResourceId", s.ArmResourceID) + populate(objectMap, "storageAccountHnsEnabled", s.StorageAccountHnsEnabled) + populate(objectMap, "storageAccountName", s.StorageAccountName) + populate(objectMap, "storageAccountType", s.StorageAccountType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SystemCreatedStorageAccount. +func (s *SystemCreatedStorageAccount) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "allowBlobPublicAccess": + err = unpopulate(val, "AllowBlobPublicAccess", &s.AllowBlobPublicAccess) + delete(rawMsg, key) + case "armResourceId": + err = unpopulate(val, "ArmResourceID", &s.ArmResourceID) + delete(rawMsg, key) + case "storageAccountHnsEnabled": + err = unpopulate(val, "StorageAccountHnsEnabled", &s.StorageAccountHnsEnabled) + delete(rawMsg, key) + case "storageAccountName": + err = unpopulate(val, "StorageAccountName", &s.StorageAccountName) + delete(rawMsg, key) + case "storageAccountType": + err = unpopulate(val, "StorageAccountType", &s.StorageAccountType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SystemData. +func (s SystemData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeRFC3339(objectMap, "createdAt", s.CreatedAt) + populate(objectMap, "createdBy", s.CreatedBy) + populate(objectMap, "createdByType", s.CreatedByType) + populateTimeRFC3339(objectMap, "lastModifiedAt", s.LastModifiedAt) + populate(objectMap, "lastModifiedBy", s.LastModifiedBy) + populate(objectMap, "lastModifiedByType", s.LastModifiedByType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SystemData. +func (s *SystemData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "createdAt": + err = unpopulateTimeRFC3339(val, "CreatedAt", &s.CreatedAt) + delete(rawMsg, key) + case "createdBy": + err = unpopulate(val, "CreatedBy", &s.CreatedBy) + delete(rawMsg, key) + case "createdByType": + err = unpopulate(val, "CreatedByType", &s.CreatedByType) + delete(rawMsg, key) + case "lastModifiedAt": + err = unpopulateTimeRFC3339(val, "LastModifiedAt", &s.LastModifiedAt) + delete(rawMsg, key) + case "lastModifiedBy": + err = unpopulate(val, "LastModifiedBy", &s.LastModifiedBy) + delete(rawMsg, key) + case "lastModifiedByType": + err = unpopulate(val, "LastModifiedByType", &s.LastModifiedByType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SystemService. +func (s SystemService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "publicIpAddress", s.PublicIPAddress) + populate(objectMap, "systemServiceType", s.SystemServiceType) + populate(objectMap, "version", s.Version) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SystemService. +func (s *SystemService) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "publicIpAddress": + err = unpopulate(val, "PublicIPAddress", &s.PublicIPAddress) + delete(rawMsg, key) + case "systemServiceType": + err = unpopulate(val, "SystemServiceType", &s.SystemServiceType) + delete(rawMsg, key) + case "version": + err = unpopulate(val, "Version", &s.Version) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type TableFixedParameters. +func (t TableFixedParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "booster", t.Booster) + populate(objectMap, "boostingType", t.BoostingType) + populate(objectMap, "growPolicy", t.GrowPolicy) + populate(objectMap, "learningRate", t.LearningRate) + populate(objectMap, "maxBin", t.MaxBin) + populate(objectMap, "maxDepth", t.MaxDepth) + populate(objectMap, "maxLeaves", t.MaxLeaves) + populate(objectMap, "minDataInLeaf", t.MinDataInLeaf) + populate(objectMap, "minSplitGain", t.MinSplitGain) + populate(objectMap, "modelName", t.ModelName) + populate(objectMap, "nEstimators", t.NEstimators) + populate(objectMap, "numLeaves", t.NumLeaves) + populate(objectMap, "preprocessorName", t.PreprocessorName) + populate(objectMap, "regAlpha", t.RegAlpha) + populate(objectMap, "regLambda", t.RegLambda) + populate(objectMap, "subsample", t.Subsample) + populate(objectMap, "subsampleFreq", t.SubsampleFreq) + populate(objectMap, "treeMethod", t.TreeMethod) + populate(objectMap, "withMean", t.WithMean) + populate(objectMap, "withStd", t.WithStd) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TableFixedParameters. +func (t *TableFixedParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "booster": + err = unpopulate(val, "Booster", &t.Booster) + delete(rawMsg, key) + case "boostingType": + err = unpopulate(val, "BoostingType", &t.BoostingType) + delete(rawMsg, key) + case "growPolicy": + err = unpopulate(val, "GrowPolicy", &t.GrowPolicy) + delete(rawMsg, key) + case "learningRate": + err = unpopulate(val, "LearningRate", &t.LearningRate) + delete(rawMsg, key) + case "maxBin": + err = unpopulate(val, "MaxBin", &t.MaxBin) + delete(rawMsg, key) + case "maxDepth": + err = unpopulate(val, "MaxDepth", &t.MaxDepth) + delete(rawMsg, key) + case "maxLeaves": + err = unpopulate(val, "MaxLeaves", &t.MaxLeaves) + delete(rawMsg, key) + case "minDataInLeaf": + err = unpopulate(val, "MinDataInLeaf", &t.MinDataInLeaf) + delete(rawMsg, key) + case "minSplitGain": + err = unpopulate(val, "MinSplitGain", &t.MinSplitGain) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &t.ModelName) + delete(rawMsg, key) + case "nEstimators": + err = unpopulate(val, "NEstimators", &t.NEstimators) + delete(rawMsg, key) + case "numLeaves": + err = unpopulate(val, "NumLeaves", &t.NumLeaves) + delete(rawMsg, key) + case "preprocessorName": + err = unpopulate(val, "PreprocessorName", &t.PreprocessorName) + delete(rawMsg, key) + case "regAlpha": + err = unpopulate(val, "RegAlpha", &t.RegAlpha) delete(rawMsg, key) - case "poolName": - err = unpopulate(val, "PoolName", &s.PoolName) + case "regLambda": + err = unpopulate(val, "RegLambda", &t.RegLambda) delete(rawMsg, key) - case "resourceGroup": - err = unpopulate(val, "ResourceGroup", &s.ResourceGroup) + case "subsample": + err = unpopulate(val, "Subsample", &t.Subsample) delete(rawMsg, key) - case "sparkVersion": - err = unpopulate(val, "SparkVersion", &s.SparkVersion) + case "subsampleFreq": + err = unpopulate(val, "SubsampleFreq", &t.SubsampleFreq) delete(rawMsg, key) - case "subscriptionId": - err = unpopulate(val, "SubscriptionID", &s.SubscriptionID) + case "treeMethod": + err = unpopulate(val, "TreeMethod", &t.TreeMethod) delete(rawMsg, key) - case "workspaceName": - err = unpopulate(val, "WorkspaceName", &s.WorkspaceName) + case "withMean": + err = unpopulate(val, "WithMean", &t.WithMean) + delete(rawMsg, key) + case "withStd": + err = unpopulate(val, "WithStd", &t.WithStd) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", s, err) + return fmt.Errorf("unmarshalling type %T: %v", t, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type SystemData. -func (s SystemData) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type TableParameterSubspace. +func (t TableParameterSubspace) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "createdAt", s.CreatedAt) - populate(objectMap, "createdBy", s.CreatedBy) - populate(objectMap, "createdByType", s.CreatedByType) - populateTimeRFC3339(objectMap, "lastModifiedAt", s.LastModifiedAt) - populate(objectMap, "lastModifiedBy", s.LastModifiedBy) - populate(objectMap, "lastModifiedByType", s.LastModifiedByType) + populate(objectMap, "booster", t.Booster) + populate(objectMap, "boostingType", t.BoostingType) + populate(objectMap, "growPolicy", t.GrowPolicy) + populate(objectMap, "learningRate", t.LearningRate) + populate(objectMap, "maxBin", t.MaxBin) + populate(objectMap, "maxDepth", t.MaxDepth) + populate(objectMap, "maxLeaves", t.MaxLeaves) + populate(objectMap, "minDataInLeaf", t.MinDataInLeaf) + populate(objectMap, "minSplitGain", t.MinSplitGain) + populate(objectMap, "modelName", t.ModelName) + populate(objectMap, "nEstimators", t.NEstimators) + populate(objectMap, "numLeaves", t.NumLeaves) + populate(objectMap, "preprocessorName", t.PreprocessorName) + populate(objectMap, "regAlpha", t.RegAlpha) + populate(objectMap, "regLambda", t.RegLambda) + populate(objectMap, "subsample", t.Subsample) + populate(objectMap, "subsampleFreq", t.SubsampleFreq) + populate(objectMap, "treeMethod", t.TreeMethod) + populate(objectMap, "withMean", t.WithMean) + populate(objectMap, "withStd", t.WithStd) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SystemData. -func (s *SystemData) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type TableParameterSubspace. +func (t *TableParameterSubspace) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", s, err) + return fmt.Errorf("unmarshalling type %T: %v", t, err) } for key, val := range rawMsg { var err error switch key { - case "createdAt": - err = unpopulateTimeRFC3339(val, "CreatedAt", &s.CreatedAt) + case "booster": + err = unpopulate(val, "Booster", &t.Booster) delete(rawMsg, key) - case "createdBy": - err = unpopulate(val, "CreatedBy", &s.CreatedBy) + case "boostingType": + err = unpopulate(val, "BoostingType", &t.BoostingType) delete(rawMsg, key) - case "createdByType": - err = unpopulate(val, "CreatedByType", &s.CreatedByType) + case "growPolicy": + err = unpopulate(val, "GrowPolicy", &t.GrowPolicy) delete(rawMsg, key) - case "lastModifiedAt": - err = unpopulateTimeRFC3339(val, "LastModifiedAt", &s.LastModifiedAt) + case "learningRate": + err = unpopulate(val, "LearningRate", &t.LearningRate) delete(rawMsg, key) - case "lastModifiedBy": - err = unpopulate(val, "LastModifiedBy", &s.LastModifiedBy) + case "maxBin": + err = unpopulate(val, "MaxBin", &t.MaxBin) delete(rawMsg, key) - case "lastModifiedByType": - err = unpopulate(val, "LastModifiedByType", &s.LastModifiedByType) + case "maxDepth": + err = unpopulate(val, "MaxDepth", &t.MaxDepth) + delete(rawMsg, key) + case "maxLeaves": + err = unpopulate(val, "MaxLeaves", &t.MaxLeaves) + delete(rawMsg, key) + case "minDataInLeaf": + err = unpopulate(val, "MinDataInLeaf", &t.MinDataInLeaf) + delete(rawMsg, key) + case "minSplitGain": + err = unpopulate(val, "MinSplitGain", &t.MinSplitGain) + delete(rawMsg, key) + case "modelName": + err = unpopulate(val, "ModelName", &t.ModelName) + delete(rawMsg, key) + case "nEstimators": + err = unpopulate(val, "NEstimators", &t.NEstimators) + delete(rawMsg, key) + case "numLeaves": + err = unpopulate(val, "NumLeaves", &t.NumLeaves) + delete(rawMsg, key) + case "preprocessorName": + err = unpopulate(val, "PreprocessorName", &t.PreprocessorName) + delete(rawMsg, key) + case "regAlpha": + err = unpopulate(val, "RegAlpha", &t.RegAlpha) + delete(rawMsg, key) + case "regLambda": + err = unpopulate(val, "RegLambda", &t.RegLambda) + delete(rawMsg, key) + case "subsample": + err = unpopulate(val, "Subsample", &t.Subsample) + delete(rawMsg, key) + case "subsampleFreq": + err = unpopulate(val, "SubsampleFreq", &t.SubsampleFreq) + delete(rawMsg, key) + case "treeMethod": + err = unpopulate(val, "TreeMethod", &t.TreeMethod) + delete(rawMsg, key) + case "withMean": + err = unpopulate(val, "WithMean", &t.WithMean) + delete(rawMsg, key) + case "withStd": + err = unpopulate(val, "WithStd", &t.WithStd) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", s, err) + return fmt.Errorf("unmarshalling type %T: %v", t, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type SystemService. -func (s SystemService) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type TableSweepSettings. +func (t TableSweepSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "publicIpAddress", s.PublicIPAddress) - populate(objectMap, "systemServiceType", s.SystemServiceType) - populate(objectMap, "version", s.Version) + populate(objectMap, "earlyTermination", t.EarlyTermination) + populate(objectMap, "samplingAlgorithm", t.SamplingAlgorithm) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type SystemService. -func (s *SystemService) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type TableSweepSettings. +func (t *TableSweepSettings) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", s, err) + return fmt.Errorf("unmarshalling type %T: %v", t, err) } for key, val := range rawMsg { var err error switch key { - case "publicIpAddress": - err = unpopulate(val, "PublicIPAddress", &s.PublicIPAddress) - delete(rawMsg, key) - case "systemServiceType": - err = unpopulate(val, "SystemServiceType", &s.SystemServiceType) + case "earlyTermination": + t.EarlyTermination, err = unmarshalEarlyTerminationPolicyClassification(val) delete(rawMsg, key) - case "version": - err = unpopulate(val, "Version", &s.Version) + case "samplingAlgorithm": + err = unpopulate(val, "SamplingAlgorithm", &t.SamplingAlgorithm) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", s, err) + return fmt.Errorf("unmarshalling type %T: %v", t, err) } } return nil @@ -13571,8 +21582,11 @@ func (t TableVertical) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "cvSplitColumnNames", t.CvSplitColumnNames) populate(objectMap, "featurizationSettings", t.FeaturizationSettings) + populate(objectMap, "fixedParameters", t.FixedParameters) populate(objectMap, "limitSettings", t.LimitSettings) populate(objectMap, "nCrossValidations", t.NCrossValidations) + populate(objectMap, "searchSpace", t.SearchSpace) + populate(objectMap, "sweepSettings", t.SweepSettings) populate(objectMap, "testData", t.TestData) populate(objectMap, "testDataSize", t.TestDataSize) populate(objectMap, "validationData", t.ValidationData) @@ -13596,12 +21610,21 @@ func (t *TableVertical) UnmarshalJSON(data []byte) error { case "featurizationSettings": err = unpopulate(val, "FeaturizationSettings", &t.FeaturizationSettings) delete(rawMsg, key) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &t.FixedParameters) + delete(rawMsg, key) case "limitSettings": err = unpopulate(val, "LimitSettings", &t.LimitSettings) delete(rawMsg, key) case "nCrossValidations": t.NCrossValidations, err = unmarshalNCrossValidationsClassification(val) delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &t.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &t.SweepSettings) + delete(rawMsg, key) case "testData": err = unpopulate(val, "TestData", &t.TestData) delete(rawMsg, key) @@ -13679,7 +21702,10 @@ func (t TableVerticalLimitSettings) MarshalJSON() ([]byte, error) { populate(objectMap, "exitScore", t.ExitScore) populate(objectMap, "maxConcurrentTrials", t.MaxConcurrentTrials) populate(objectMap, "maxCoresPerTrial", t.MaxCoresPerTrial) + populate(objectMap, "maxNodes", t.MaxNodes) populate(objectMap, "maxTrials", t.MaxTrials) + populate(objectMap, "sweepConcurrentTrials", t.SweepConcurrentTrials) + populate(objectMap, "sweepTrials", t.SweepTrials) populate(objectMap, "timeout", t.Timeout) populate(objectMap, "trialTimeout", t.TrialTimeout) return json.Marshal(objectMap) @@ -13706,9 +21732,18 @@ func (t *TableVerticalLimitSettings) UnmarshalJSON(data []byte) error { case "maxCoresPerTrial": err = unpopulate(val, "MaxCoresPerTrial", &t.MaxCoresPerTrial) delete(rawMsg, key) + case "maxNodes": + err = unpopulate(val, "MaxNodes", &t.MaxNodes) + delete(rawMsg, key) case "maxTrials": err = unpopulate(val, "MaxTrials", &t.MaxTrials) delete(rawMsg, key) + case "sweepConcurrentTrials": + err = unpopulate(val, "SweepConcurrentTrials", &t.SweepConcurrentTrials) + delete(rawMsg, key) + case "sweepTrials": + err = unpopulate(val, "SweepTrials", &t.SweepTrials) + delete(rawMsg, key) case "timeout": err = unpopulate(val, "Timeout", &t.Timeout) delete(rawMsg, key) @@ -13859,9 +21894,12 @@ func (t *TensorFlow) UnmarshalJSON(data []byte) error { func (t TextClassification) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "featurizationSettings", t.FeaturizationSettings) + populate(objectMap, "fixedParameters", t.FixedParameters) populate(objectMap, "limitSettings", t.LimitSettings) populate(objectMap, "logVerbosity", t.LogVerbosity) populate(objectMap, "primaryMetric", t.PrimaryMetric) + populate(objectMap, "searchSpace", t.SearchSpace) + populate(objectMap, "sweepSettings", t.SweepSettings) populate(objectMap, "targetColumnName", t.TargetColumnName) objectMap["taskType"] = TaskTypeTextClassification populate(objectMap, "trainingData", t.TrainingData) @@ -13881,6 +21919,9 @@ func (t *TextClassification) UnmarshalJSON(data []byte) error { case "featurizationSettings": err = unpopulate(val, "FeaturizationSettings", &t.FeaturizationSettings) delete(rawMsg, key) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &t.FixedParameters) + delete(rawMsg, key) case "limitSettings": err = unpopulate(val, "LimitSettings", &t.LimitSettings) delete(rawMsg, key) @@ -13890,6 +21931,12 @@ func (t *TextClassification) UnmarshalJSON(data []byte) error { case "primaryMetric": err = unpopulate(val, "PrimaryMetric", &t.PrimaryMetric) delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &t.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &t.SweepSettings) + delete(rawMsg, key) case "targetColumnName": err = unpopulate(val, "TargetColumnName", &t.TargetColumnName) delete(rawMsg, key) @@ -13914,9 +21961,12 @@ func (t *TextClassification) UnmarshalJSON(data []byte) error { func (t TextClassificationMultilabel) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "featurizationSettings", t.FeaturizationSettings) + populate(objectMap, "fixedParameters", t.FixedParameters) populate(objectMap, "limitSettings", t.LimitSettings) populate(objectMap, "logVerbosity", t.LogVerbosity) populate(objectMap, "primaryMetric", t.PrimaryMetric) + populate(objectMap, "searchSpace", t.SearchSpace) + populate(objectMap, "sweepSettings", t.SweepSettings) populate(objectMap, "targetColumnName", t.TargetColumnName) objectMap["taskType"] = TaskTypeTextClassificationMultilabel populate(objectMap, "trainingData", t.TrainingData) @@ -13936,6 +21986,9 @@ func (t *TextClassificationMultilabel) UnmarshalJSON(data []byte) error { case "featurizationSettings": err = unpopulate(val, "FeaturizationSettings", &t.FeaturizationSettings) delete(rawMsg, key) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &t.FixedParameters) + delete(rawMsg, key) case "limitSettings": err = unpopulate(val, "LimitSettings", &t.LimitSettings) delete(rawMsg, key) @@ -13945,6 +21998,12 @@ func (t *TextClassificationMultilabel) UnmarshalJSON(data []byte) error { case "primaryMetric": err = unpopulate(val, "PrimaryMetric", &t.PrimaryMetric) delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &t.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &t.SweepSettings) + delete(rawMsg, key) case "targetColumnName": err = unpopulate(val, "TargetColumnName", &t.TargetColumnName) delete(rawMsg, key) @@ -13969,9 +22028,12 @@ func (t *TextClassificationMultilabel) UnmarshalJSON(data []byte) error { func (t TextNer) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "featurizationSettings", t.FeaturizationSettings) + populate(objectMap, "fixedParameters", t.FixedParameters) populate(objectMap, "limitSettings", t.LimitSettings) populate(objectMap, "logVerbosity", t.LogVerbosity) populate(objectMap, "primaryMetric", t.PrimaryMetric) + populate(objectMap, "searchSpace", t.SearchSpace) + populate(objectMap, "sweepSettings", t.SweepSettings) populate(objectMap, "targetColumnName", t.TargetColumnName) objectMap["taskType"] = TaskTypeTextNER populate(objectMap, "trainingData", t.TrainingData) @@ -13991,6 +22053,9 @@ func (t *TextNer) UnmarshalJSON(data []byte) error { case "featurizationSettings": err = unpopulate(val, "FeaturizationSettings", &t.FeaturizationSettings) delete(rawMsg, key) + case "fixedParameters": + err = unpopulate(val, "FixedParameters", &t.FixedParameters) + delete(rawMsg, key) case "limitSettings": err = unpopulate(val, "LimitSettings", &t.LimitSettings) delete(rawMsg, key) @@ -14000,6 +22065,12 @@ func (t *TextNer) UnmarshalJSON(data []byte) error { case "primaryMetric": err = unpopulate(val, "PrimaryMetric", &t.PrimaryMetric) delete(rawMsg, key) + case "searchSpace": + err = unpopulate(val, "SearchSpace", &t.SearchSpace) + delete(rawMsg, key) + case "sweepSettings": + err = unpopulate(val, "SweepSettings", &t.SweepSettings) + delete(rawMsg, key) case "targetColumnName": err = unpopulate(val, "TargetColumnName", &t.TargetColumnName) delete(rawMsg, key) @@ -14020,6 +22091,64 @@ func (t *TextNer) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type TmpfsOptions. +func (t TmpfsOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "size", t.Size) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TmpfsOptions. +func (t *TmpfsOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "size": + err = unpopulate(val, "Size", &t.Size) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type TopNFeaturesByAttribution. +func (t TopNFeaturesByAttribution) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["filterType"] = MonitoringFeatureFilterTypeTopNByAttribution + populate(objectMap, "top", t.Top) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TopNFeaturesByAttribution. +func (t *TopNFeaturesByAttribution) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "filterType": + err = unpopulate(val, "FilterType", &t.FilterType) + delete(rawMsg, key) + case "top": + err = unpopulate(val, "Top", &t.Top) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type TrackedResource. func (t TrackedResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -14067,6 +22196,61 @@ func (t *TrackedResource) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type TrailingInputData. +func (t TrailingInputData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "columns", t.Columns) + populate(objectMap, "dataContext", t.DataContext) + objectMap["inputDataType"] = MonitoringInputDataTypeTrailing + populate(objectMap, "jobInputType", t.JobInputType) + populate(objectMap, "preprocessingComponentId", t.PreprocessingComponentID) + populate(objectMap, "uri", t.URI) + populate(objectMap, "windowOffset", t.WindowOffset) + populate(objectMap, "windowSize", t.WindowSize) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TrailingInputData. +func (t *TrailingInputData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "columns": + err = unpopulate(val, "Columns", &t.Columns) + delete(rawMsg, key) + case "dataContext": + err = unpopulate(val, "DataContext", &t.DataContext) + delete(rawMsg, key) + case "inputDataType": + err = unpopulate(val, "InputDataType", &t.InputDataType) + delete(rawMsg, key) + case "jobInputType": + err = unpopulate(val, "JobInputType", &t.JobInputType) + delete(rawMsg, key) + case "preprocessingComponentId": + err = unpopulate(val, "PreprocessingComponentID", &t.PreprocessingComponentID) + delete(rawMsg, key) + case "uri": + err = unpopulate(val, "URI", &t.URI) + delete(rawMsg, key) + case "windowOffset": + err = unpopulate(val, "WindowOffset", &t.WindowOffset) + delete(rawMsg, key) + case "windowSize": + err = unpopulate(val, "WindowSize", &t.WindowSize) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type TrainingSettings. func (t TrainingSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -14077,6 +22261,7 @@ func (t TrainingSettings) MarshalJSON() ([]byte, error) { populate(objectMap, "enableVoteEnsemble", t.EnableVoteEnsemble) populate(objectMap, "ensembleModelDownloadTimeout", t.EnsembleModelDownloadTimeout) populate(objectMap, "stackEnsembleSettings", t.StackEnsembleSettings) + populate(objectMap, "trainingMode", t.TrainingMode) return json.Marshal(objectMap) } @@ -14110,6 +22295,9 @@ func (t *TrainingSettings) UnmarshalJSON(data []byte) error { case "stackEnsembleSettings": err = unpopulate(val, "StackEnsembleSettings", &t.StackEnsembleSettings) delete(rawMsg, key) + case "trainingMode": + err = unpopulate(val, "TrainingMode", &t.TrainingMode) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", t, err) @@ -14204,6 +22392,37 @@ func (t *TriggerBase) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type TritonInferencingServer. +func (t TritonInferencingServer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "inferenceConfiguration", t.InferenceConfiguration) + objectMap["serverType"] = InferencingServerTypeTriton + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TritonInferencingServer. +func (t *TritonInferencingServer) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "inferenceConfiguration": + err = unpopulate(val, "InferenceConfiguration", &t.InferenceConfiguration) + delete(rawMsg, key) + case "serverType": + err = unpopulate(val, "ServerType", &t.ServerType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type TritonModelJobInput. func (t TritonModelJobInput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -14246,6 +22465,9 @@ func (t *TritonModelJobInput) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type TritonModelJobOutput. func (t TritonModelJobOutput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "assetName", t.AssetName) + populate(objectMap, "assetVersion", t.AssetVersion) + populate(objectMap, "autoDeleteSetting", t.AutoDeleteSetting) populate(objectMap, "description", t.Description) objectMap["jobOutputType"] = JobOutputTypeTritonModel populate(objectMap, "mode", t.Mode) @@ -14262,6 +22484,15 @@ func (t *TritonModelJobOutput) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "assetName": + err = unpopulate(val, "AssetName", &t.AssetName) + delete(rawMsg, key) + case "assetVersion": + err = unpopulate(val, "AssetVersion", &t.AssetVersion) + delete(rawMsg, key) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &t.AutoDeleteSetting) + delete(rawMsg, key) case "description": err = unpopulate(val, "Description", &t.Description) delete(rawMsg, key) @@ -14324,12 +22555,15 @@ func (t *TruncationSelectionPolicy) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type URIFileDataVersion. func (u URIFileDataVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", u.AutoDeleteSetting) objectMap["dataType"] = DataTypeURIFile populate(objectMap, "dataUri", u.DataURI) populate(objectMap, "description", u.Description) + populate(objectMap, "intellectualProperty", u.IntellectualProperty) populate(objectMap, "isAnonymous", u.IsAnonymous) populate(objectMap, "isArchived", u.IsArchived) populate(objectMap, "properties", u.Properties) + populate(objectMap, "stage", u.Stage) populate(objectMap, "tags", u.Tags) return json.Marshal(objectMap) } @@ -14343,6 +22577,9 @@ func (u *URIFileDataVersion) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &u.AutoDeleteSetting) + delete(rawMsg, key) case "dataType": err = unpopulate(val, "DataType", &u.DataType) delete(rawMsg, key) @@ -14352,6 +22589,9 @@ func (u *URIFileDataVersion) UnmarshalJSON(data []byte) error { case "description": err = unpopulate(val, "Description", &u.Description) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &u.IntellectualProperty) + delete(rawMsg, key) case "isAnonymous": err = unpopulate(val, "IsAnonymous", &u.IsAnonymous) delete(rawMsg, key) @@ -14361,6 +22601,9 @@ func (u *URIFileDataVersion) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &u.Properties) delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &u.Stage) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &u.Tags) delete(rawMsg, key) @@ -14414,6 +22657,9 @@ func (u *URIFileJobInput) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type URIFileJobOutput. func (u URIFileJobOutput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "assetName", u.AssetName) + populate(objectMap, "assetVersion", u.AssetVersion) + populate(objectMap, "autoDeleteSetting", u.AutoDeleteSetting) populate(objectMap, "description", u.Description) objectMap["jobOutputType"] = JobOutputTypeURIFile populate(objectMap, "mode", u.Mode) @@ -14430,6 +22676,15 @@ func (u *URIFileJobOutput) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "assetName": + err = unpopulate(val, "AssetName", &u.AssetName) + delete(rawMsg, key) + case "assetVersion": + err = unpopulate(val, "AssetVersion", &u.AssetVersion) + delete(rawMsg, key) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &u.AutoDeleteSetting) + delete(rawMsg, key) case "description": err = unpopulate(val, "Description", &u.Description) delete(rawMsg, key) @@ -14453,12 +22708,15 @@ func (u *URIFileJobOutput) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type URIFolderDataVersion. func (u URIFolderDataVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "autoDeleteSetting", u.AutoDeleteSetting) objectMap["dataType"] = DataTypeURIFolder populate(objectMap, "dataUri", u.DataURI) populate(objectMap, "description", u.Description) + populate(objectMap, "intellectualProperty", u.IntellectualProperty) populate(objectMap, "isAnonymous", u.IsAnonymous) populate(objectMap, "isArchived", u.IsArchived) populate(objectMap, "properties", u.Properties) + populate(objectMap, "stage", u.Stage) populate(objectMap, "tags", u.Tags) return json.Marshal(objectMap) } @@ -14472,6 +22730,9 @@ func (u *URIFolderDataVersion) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &u.AutoDeleteSetting) + delete(rawMsg, key) case "dataType": err = unpopulate(val, "DataType", &u.DataType) delete(rawMsg, key) @@ -14481,6 +22742,9 @@ func (u *URIFolderDataVersion) UnmarshalJSON(data []byte) error { case "description": err = unpopulate(val, "Description", &u.Description) delete(rawMsg, key) + case "intellectualProperty": + err = unpopulate(val, "IntellectualProperty", &u.IntellectualProperty) + delete(rawMsg, key) case "isAnonymous": err = unpopulate(val, "IsAnonymous", &u.IsAnonymous) delete(rawMsg, key) @@ -14490,6 +22754,9 @@ func (u *URIFolderDataVersion) UnmarshalJSON(data []byte) error { case "properties": err = unpopulate(val, "Properties", &u.Properties) delete(rawMsg, key) + case "stage": + err = unpopulate(val, "Stage", &u.Stage) + delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &u.Tags) delete(rawMsg, key) @@ -14543,6 +22810,9 @@ func (u *URIFolderJobInput) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type URIFolderJobOutput. func (u URIFolderJobOutput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "assetName", u.AssetName) + populate(objectMap, "assetVersion", u.AssetVersion) + populate(objectMap, "autoDeleteSetting", u.AutoDeleteSetting) populate(objectMap, "description", u.Description) objectMap["jobOutputType"] = JobOutputTypeURIFolder populate(objectMap, "mode", u.Mode) @@ -14559,6 +22829,15 @@ func (u *URIFolderJobOutput) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "assetName": + err = unpopulate(val, "AssetName", &u.AssetName) + delete(rawMsg, key) + case "assetVersion": + err = unpopulate(val, "AssetVersion", &u.AssetVersion) + delete(rawMsg, key) + case "autoDeleteSetting": + err = unpopulate(val, "AutoDeleteSetting", &u.AutoDeleteSetting) + delete(rawMsg, key) case "description": err = unpopulate(val, "Description", &u.Description) delete(rawMsg, key) @@ -14787,11 +23066,65 @@ func (u *UserAssignedIdentity) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "clientId": - err = unpopulate(val, "ClientID", &u.ClientID) - delete(rawMsg, key) - case "principalId": - err = unpopulate(val, "PrincipalID", &u.PrincipalID) + case "clientId": + err = unpopulate(val, "ClientID", &u.ClientID) + delete(rawMsg, key) + case "principalId": + err = unpopulate(val, "PrincipalID", &u.PrincipalID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UserCreatedAcrAccount. +func (u UserCreatedAcrAccount) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "armResourceId", u.ArmResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserCreatedAcrAccount. +func (u *UserCreatedAcrAccount) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "armResourceId": + err = unpopulate(val, "ArmResourceID", &u.ArmResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UserCreatedStorageAccount. +func (u UserCreatedStorageAccount) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "armResourceId", u.ArmResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserCreatedStorageAccount. +func (u *UserCreatedStorageAccount) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "armResourceId": + err = unpopulate(val, "ArmResourceID", &u.ArmResourceID) delete(rawMsg, key) } if err != nil { @@ -14834,9 +23167,9 @@ func (u UsernamePasswordAuthTypeWorkspaceConnectionProperties) MarshalJSON() ([] objectMap["authType"] = ConnectionAuthTypeUsernamePassword populate(objectMap, "category", u.Category) populate(objectMap, "credentials", u.Credentials) + populateTimeRFC3339(objectMap, "expiryTime", u.ExpiryTime) + populateAny(objectMap, "metadata", u.Metadata) populate(objectMap, "target", u.Target) - populate(objectMap, "value", u.Value) - populate(objectMap, "valueFormat", u.ValueFormat) return json.Marshal(objectMap) } @@ -14858,14 +23191,14 @@ func (u *UsernamePasswordAuthTypeWorkspaceConnectionProperties) UnmarshalJSON(da case "credentials": err = unpopulate(val, "Credentials", &u.Credentials) delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &u.Target) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &u.ExpiryTime) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &u.Value) + case "metadata": + err = unpopulate(val, "Metadata", &u.Metadata) delete(rawMsg, key) - case "valueFormat": - err = unpopulate(val, "ValueFormat", &u.ValueFormat) + case "target": + err = unpopulate(val, "Target", &u.Target) delete(rawMsg, key) } if err != nil { @@ -15234,11 +23567,125 @@ func (v *VirtualMachineSizeListResult) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type VolumeDefinition. +func (v VolumeDefinition) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "bind", v.Bind) + populate(objectMap, "consistency", v.Consistency) + populate(objectMap, "readOnly", v.ReadOnly) + populate(objectMap, "source", v.Source) + populate(objectMap, "target", v.Target) + populate(objectMap, "tmpfs", v.Tmpfs) + populate(objectMap, "type", v.Type) + populate(objectMap, "volume", v.Volume) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VolumeDefinition. +func (v *VolumeDefinition) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "bind": + err = unpopulate(val, "Bind", &v.Bind) + delete(rawMsg, key) + case "consistency": + err = unpopulate(val, "Consistency", &v.Consistency) + delete(rawMsg, key) + case "readOnly": + err = unpopulate(val, "ReadOnly", &v.ReadOnly) + delete(rawMsg, key) + case "source": + err = unpopulate(val, "Source", &v.Source) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &v.Target) + delete(rawMsg, key) + case "tmpfs": + err = unpopulate(val, "Tmpfs", &v.Tmpfs) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &v.Type) + delete(rawMsg, key) + case "volume": + err = unpopulate(val, "Volume", &v.Volume) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VolumeOptions. +func (v VolumeOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nocopy", v.Nocopy) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VolumeOptions. +func (v *VolumeOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nocopy": + err = unpopulate(val, "Nocopy", &v.Nocopy) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Webhook. +func (w Webhook) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "eventType", w.EventType) + objectMap["webhookType"] = w.WebhookType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Webhook. +func (w *Webhook) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "eventType": + err = unpopulate(val, "EventType", &w.EventType) + delete(rawMsg, key) + case "webhookType": + err = unpopulate(val, "WebhookType", &w.WebhookType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type Workspace. func (w Workspace) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "id", w.ID) populate(objectMap, "identity", w.Identity) + populate(objectMap, "kind", w.Kind) populate(objectMap, "location", w.Location) populate(objectMap, "name", w.Name) populate(objectMap, "properties", w.Properties) @@ -15264,6 +23711,9 @@ func (w *Workspace) UnmarshalJSON(data []byte) error { case "identity": err = unpopulate(val, "Identity", &w.Identity) delete(rawMsg, key) + case "kind": + err = unpopulate(val, "Kind", &w.Kind) + delete(rawMsg, key) case "location": err = unpopulate(val, "Location", &w.Location) delete(rawMsg, key) @@ -15293,6 +23743,64 @@ func (w *Workspace) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type WorkspaceConnectionAPIKey. +func (w WorkspaceConnectionAPIKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "key", w.Key) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type WorkspaceConnectionAPIKey. +func (w *WorkspaceConnectionAPIKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "key": + err = unpopulate(val, "Key", &w.Key) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type WorkspaceConnectionAccessKey. +func (w WorkspaceConnectionAccessKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "accessKeyId", w.AccessKeyID) + populate(objectMap, "secretAccessKey", w.SecretAccessKey) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type WorkspaceConnectionAccessKey. +func (w *WorkspaceConnectionAccessKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "accessKeyId": + err = unpopulate(val, "AccessKeyID", &w.AccessKeyID) + delete(rawMsg, key) + case "secretAccessKey": + err = unpopulate(val, "SecretAccessKey", &w.SecretAccessKey) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type WorkspaceConnectionManagedIdentity. func (w WorkspaceConnectionManagedIdentity) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -15356,9 +23864,9 @@ func (w WorkspaceConnectionPropertiesV2) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) objectMap["authType"] = w.AuthType populate(objectMap, "category", w.Category) + populateTimeRFC3339(objectMap, "expiryTime", w.ExpiryTime) + populateAny(objectMap, "metadata", w.Metadata) populate(objectMap, "target", w.Target) - populate(objectMap, "value", w.Value) - populate(objectMap, "valueFormat", w.ValueFormat) return json.Marshal(objectMap) } @@ -15377,14 +23885,14 @@ func (w *WorkspaceConnectionPropertiesV2) UnmarshalJSON(data []byte) error { case "category": err = unpopulate(val, "Category", &w.Category) delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &w.Target) + case "expiryTime": + err = unpopulateTimeRFC3339(val, "ExpiryTime", &w.ExpiryTime) delete(rawMsg, key) - case "value": - err = unpopulate(val, "Value", &w.Value) + case "metadata": + err = unpopulate(val, "Metadata", &w.Metadata) delete(rawMsg, key) - case "valueFormat": - err = unpopulate(val, "ValueFormat", &w.ValueFormat) + case "target": + err = unpopulate(val, "Target", &w.Target) delete(rawMsg, key) } if err != nil { @@ -15468,6 +23976,41 @@ func (w *WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult) Unmarsh return nil } +// MarshalJSON implements the json.Marshaller interface for type WorkspaceConnectionServicePrincipal. +func (w WorkspaceConnectionServicePrincipal) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "clientId", w.ClientID) + populate(objectMap, "clientSecret", w.ClientSecret) + populate(objectMap, "tenantId", w.TenantID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type WorkspaceConnectionServicePrincipal. +func (w *WorkspaceConnectionServicePrincipal) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "clientId": + err = unpopulate(val, "ClientID", &w.ClientID) + delete(rawMsg, key) + case "clientSecret": + err = unpopulate(val, "ClientSecret", &w.ClientSecret) + delete(rawMsg, key) + case "tenantId": + err = unpopulate(val, "TenantID", &w.TenantID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type WorkspaceConnectionSharedAccessSignature. func (w WorkspaceConnectionSharedAccessSignature) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -15495,6 +24038,33 @@ func (w *WorkspaceConnectionSharedAccessSignature) UnmarshalJSON(data []byte) er return nil } +// MarshalJSON implements the json.Marshaller interface for type WorkspaceConnectionUpdateParameter. +func (w WorkspaceConnectionUpdateParameter) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "properties", w.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type WorkspaceConnectionUpdateParameter. +func (w *WorkspaceConnectionUpdateParameter) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "properties": + w.Properties, err = unmarshalWorkspaceConnectionPropertiesV2Classification(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type WorkspaceConnectionUsernamePassword. func (w WorkspaceConnectionUsernamePassword) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -15526,6 +24096,37 @@ func (w *WorkspaceConnectionUsernamePassword) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type WorkspaceHubConfig. +func (w WorkspaceHubConfig) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "additionalWorkspaceStorageAccounts", w.AdditionalWorkspaceStorageAccounts) + populate(objectMap, "defaultWorkspaceResourceGroup", w.DefaultWorkspaceResourceGroup) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type WorkspaceHubConfig. +func (w *WorkspaceHubConfig) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "additionalWorkspaceStorageAccounts": + err = unpopulate(val, "AdditionalWorkspaceStorageAccounts", &w.AdditionalWorkspaceStorageAccounts) + delete(rawMsg, key) + case "defaultWorkspaceResourceGroup": + err = unpopulate(val, "DefaultWorkspaceResourceGroup", &w.DefaultWorkspaceResourceGroup) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type WorkspaceListResult. func (w WorkspaceListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -15557,19 +24158,58 @@ func (w *WorkspaceListResult) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type WorkspacePrivateEndpointResource. +func (w WorkspacePrivateEndpointResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", w.ID) + populate(objectMap, "subnetArmId", w.SubnetArmID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type WorkspacePrivateEndpointResource. +func (w *WorkspacePrivateEndpointResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &w.ID) + delete(rawMsg, key) + case "subnetArmId": + err = unpopulate(val, "SubnetArmID", &w.SubnetArmID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type WorkspaceProperties. func (w WorkspaceProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "allowPublicAccessWhenBehindVnet", w.AllowPublicAccessWhenBehindVnet) populate(objectMap, "applicationInsights", w.ApplicationInsights) + populate(objectMap, "associatedWorkspaces", w.AssociatedWorkspaces) + populate(objectMap, "containerRegistries", w.ContainerRegistries) populate(objectMap, "containerRegistry", w.ContainerRegistry) populate(objectMap, "description", w.Description) populate(objectMap, "discoveryUrl", w.DiscoveryURL) + populate(objectMap, "enableDataIsolation", w.EnableDataIsolation) populate(objectMap, "encryption", w.Encryption) + populate(objectMap, "existingWorkspaces", w.ExistingWorkspaces) + populate(objectMap, "featureStoreSettings", w.FeatureStoreSettings) populate(objectMap, "friendlyName", w.FriendlyName) populate(objectMap, "hbiWorkspace", w.HbiWorkspace) + populate(objectMap, "hubResourceId", w.HubResourceID) populate(objectMap, "imageBuildCompute", w.ImageBuildCompute) populate(objectMap, "keyVault", w.KeyVault) + populate(objectMap, "keyVaults", w.KeyVaults) + populate(objectMap, "managedNetwork", w.ManagedNetwork) populate(objectMap, "mlFlowTrackingUri", w.MlFlowTrackingURI) populate(objectMap, "notebookInfo", w.NotebookInfo) populate(objectMap, "primaryUserAssignedIdentity", w.PrimaryUserAssignedIdentity) @@ -15580,10 +24220,14 @@ func (w WorkspaceProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "serviceManagedResourcesSettings", w.ServiceManagedResourcesSettings) populate(objectMap, "serviceProvisionedResourceGroup", w.ServiceProvisionedResourceGroup) populate(objectMap, "sharedPrivateLinkResources", w.SharedPrivateLinkResources) + populate(objectMap, "softDeleteRetentionInDays", w.SoftDeleteRetentionInDays) populate(objectMap, "storageAccount", w.StorageAccount) + populate(objectMap, "storageAccounts", w.StorageAccounts) populate(objectMap, "storageHnsEnabled", w.StorageHnsEnabled) + populate(objectMap, "systemDatastoresAuthMode", w.SystemDatastoresAuthMode) populate(objectMap, "tenantId", w.TenantID) populate(objectMap, "v1LegacyMode", w.V1LegacyMode) + populate(objectMap, "workspaceHubConfig", w.WorkspaceHubConfig) populate(objectMap, "workspaceId", w.WorkspaceID) return json.Marshal(objectMap) } @@ -15603,6 +24247,12 @@ func (w *WorkspaceProperties) UnmarshalJSON(data []byte) error { case "applicationInsights": err = unpopulate(val, "ApplicationInsights", &w.ApplicationInsights) delete(rawMsg, key) + case "associatedWorkspaces": + err = unpopulate(val, "AssociatedWorkspaces", &w.AssociatedWorkspaces) + delete(rawMsg, key) + case "containerRegistries": + err = unpopulate(val, "ContainerRegistries", &w.ContainerRegistries) + delete(rawMsg, key) case "containerRegistry": err = unpopulate(val, "ContainerRegistry", &w.ContainerRegistry) delete(rawMsg, key) @@ -15612,21 +24262,39 @@ func (w *WorkspaceProperties) UnmarshalJSON(data []byte) error { case "discoveryUrl": err = unpopulate(val, "DiscoveryURL", &w.DiscoveryURL) delete(rawMsg, key) + case "enableDataIsolation": + err = unpopulate(val, "EnableDataIsolation", &w.EnableDataIsolation) + delete(rawMsg, key) case "encryption": err = unpopulate(val, "Encryption", &w.Encryption) delete(rawMsg, key) + case "existingWorkspaces": + err = unpopulate(val, "ExistingWorkspaces", &w.ExistingWorkspaces) + delete(rawMsg, key) + case "featureStoreSettings": + err = unpopulate(val, "FeatureStoreSettings", &w.FeatureStoreSettings) + delete(rawMsg, key) case "friendlyName": err = unpopulate(val, "FriendlyName", &w.FriendlyName) delete(rawMsg, key) case "hbiWorkspace": err = unpopulate(val, "HbiWorkspace", &w.HbiWorkspace) delete(rawMsg, key) + case "hubResourceId": + err = unpopulate(val, "HubResourceID", &w.HubResourceID) + delete(rawMsg, key) case "imageBuildCompute": err = unpopulate(val, "ImageBuildCompute", &w.ImageBuildCompute) delete(rawMsg, key) case "keyVault": err = unpopulate(val, "KeyVault", &w.KeyVault) delete(rawMsg, key) + case "keyVaults": + err = unpopulate(val, "KeyVaults", &w.KeyVaults) + delete(rawMsg, key) + case "managedNetwork": + err = unpopulate(val, "ManagedNetwork", &w.ManagedNetwork) + delete(rawMsg, key) case "mlFlowTrackingUri": err = unpopulate(val, "MlFlowTrackingURI", &w.MlFlowTrackingURI) delete(rawMsg, key) @@ -15657,18 +24325,30 @@ func (w *WorkspaceProperties) UnmarshalJSON(data []byte) error { case "sharedPrivateLinkResources": err = unpopulate(val, "SharedPrivateLinkResources", &w.SharedPrivateLinkResources) delete(rawMsg, key) + case "softDeleteRetentionInDays": + err = unpopulate(val, "SoftDeleteRetentionInDays", &w.SoftDeleteRetentionInDays) + delete(rawMsg, key) case "storageAccount": err = unpopulate(val, "StorageAccount", &w.StorageAccount) delete(rawMsg, key) + case "storageAccounts": + err = unpopulate(val, "StorageAccounts", &w.StorageAccounts) + delete(rawMsg, key) case "storageHnsEnabled": err = unpopulate(val, "StorageHnsEnabled", &w.StorageHnsEnabled) delete(rawMsg, key) + case "systemDatastoresAuthMode": + err = unpopulate(val, "SystemDatastoresAuthMode", &w.SystemDatastoresAuthMode) + delete(rawMsg, key) case "tenantId": err = unpopulate(val, "TenantID", &w.TenantID) delete(rawMsg, key) case "v1LegacyMode": err = unpopulate(val, "V1LegacyMode", &w.V1LegacyMode) delete(rawMsg, key) + case "workspaceHubConfig": + err = unpopulate(val, "WorkspaceHubConfig", &w.WorkspaceHubConfig) + delete(rawMsg, key) case "workspaceId": err = unpopulate(val, "WorkspaceID", &w.WorkspaceID) delete(rawMsg, key) @@ -15686,11 +24366,17 @@ func (w WorkspacePropertiesUpdateParameters) MarshalJSON() ([]byte, error) { populate(objectMap, "applicationInsights", w.ApplicationInsights) populate(objectMap, "containerRegistry", w.ContainerRegistry) populate(objectMap, "description", w.Description) + populate(objectMap, "enableDataIsolation", w.EnableDataIsolation) + populate(objectMap, "encryption", w.Encryption) + populate(objectMap, "featureStoreSettings", w.FeatureStoreSettings) populate(objectMap, "friendlyName", w.FriendlyName) populate(objectMap, "imageBuildCompute", w.ImageBuildCompute) + populate(objectMap, "managedNetwork", w.ManagedNetwork) populate(objectMap, "primaryUserAssignedIdentity", w.PrimaryUserAssignedIdentity) populate(objectMap, "publicNetworkAccess", w.PublicNetworkAccess) populate(objectMap, "serviceManagedResourcesSettings", w.ServiceManagedResourcesSettings) + populate(objectMap, "softDeleteRetentionInDays", w.SoftDeleteRetentionInDays) + populate(objectMap, "v1LegacyMode", w.V1LegacyMode) return json.Marshal(objectMap) } @@ -15712,12 +24398,24 @@ func (w *WorkspacePropertiesUpdateParameters) UnmarshalJSON(data []byte) error { case "description": err = unpopulate(val, "Description", &w.Description) delete(rawMsg, key) + case "enableDataIsolation": + err = unpopulate(val, "EnableDataIsolation", &w.EnableDataIsolation) + delete(rawMsg, key) + case "encryption": + err = unpopulate(val, "Encryption", &w.Encryption) + delete(rawMsg, key) + case "featureStoreSettings": + err = unpopulate(val, "FeatureStoreSettings", &w.FeatureStoreSettings) + delete(rawMsg, key) case "friendlyName": err = unpopulate(val, "FriendlyName", &w.FriendlyName) delete(rawMsg, key) case "imageBuildCompute": err = unpopulate(val, "ImageBuildCompute", &w.ImageBuildCompute) delete(rawMsg, key) + case "managedNetwork": + err = unpopulate(val, "ManagedNetwork", &w.ManagedNetwork) + delete(rawMsg, key) case "primaryUserAssignedIdentity": err = unpopulate(val, "PrimaryUserAssignedIdentity", &w.PrimaryUserAssignedIdentity) delete(rawMsg, key) @@ -15727,6 +24425,12 @@ func (w *WorkspacePropertiesUpdateParameters) UnmarshalJSON(data []byte) error { case "serviceManagedResourcesSettings": err = unpopulate(val, "ServiceManagedResourcesSettings", &w.ServiceManagedResourcesSettings) delete(rawMsg, key) + case "softDeleteRetentionInDays": + err = unpopulate(val, "SoftDeleteRetentionInDays", &w.SoftDeleteRetentionInDays) + delete(rawMsg, key) + case "v1LegacyMode": + err = unpopulate(val, "V1LegacyMode", &w.V1LegacyMode) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", w, err) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/modelversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/modelversions_client.go index e8e3ab245fb7..2bf6b515f16d 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/modelversions_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/modelversions_client.go @@ -48,7 +48,7 @@ func NewModelVersionsClient(subscriptionID string, credential azcore.TokenCreden // CreateOrUpdate - Create or update version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -99,7 +99,7 @@ func (client *ModelVersionsClient) createOrUpdateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -117,7 +117,7 @@ func (client *ModelVersionsClient) createOrUpdateHandleResponse(resp *http.Respo // Delete - Delete version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -166,7 +166,7 @@ func (client *ModelVersionsClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -175,7 +175,7 @@ func (client *ModelVersionsClient) deleteCreateRequest(ctx context.Context, reso // Get - Get version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Container name. This is case-sensitive. @@ -224,7 +224,7 @@ func (client *ModelVersionsClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -241,7 +241,7 @@ func (client *ModelVersionsClient) getHandleResponse(resp *http.Response) (Model // NewListPager - List model versions. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Model name. This is case-sensitive. @@ -298,7 +298,7 @@ func (client *ModelVersionsClient) listCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } @@ -329,6 +329,9 @@ func (client *ModelVersionsClient) listCreateRequest(ctx context.Context, resour if options != nil && options.ListViewType != nil { reqQP.Set("listViewType", string(*options.ListViewType)) } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -342,3 +345,81 @@ func (client *ModelVersionsClient) listHandleResponse(resp *http.Response) (Mode } return result, nil } + +// BeginPackage - Model Version Package operation. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - name - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Package operation request body. +// - options - ModelVersionsClientBeginPackageOptions contains the optional parameters for the ModelVersionsClient.BeginPackage +// method. +func (client *ModelVersionsClient) BeginPackage(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body PackageRequest, options *ModelVersionsClientBeginPackageOptions) (*runtime.Poller[ModelVersionsClientPackageResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.packageOperation(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ModelVersionsClientPackageResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[ModelVersionsClientPackageResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Package - Model Version Package operation. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *ModelVersionsClient) packageOperation(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body PackageRequest, options *ModelVersionsClientBeginPackageOptions) (*http.Response, error) { + req, err := client.packageCreateRequest(ctx, resourceGroupName, workspaceName, name, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// packageCreateRequest creates the Package request. +func (client *ModelVersionsClient) packageCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, name string, version string, body PackageRequest, options *ModelVersionsClientBeginPackageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}/package" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/modelversions_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/modelversions_client_example_test.go deleted file mode 100644 index c327dab98706..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/modelversions_client_example_test.go +++ /dev/null @@ -1,230 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelVersion/list.json -func ExampleModelVersionsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewModelVersionsClient().NewListPager("test-rg", "my-aml-workspace", "string", &armmachinelearning.ModelVersionsClientListOptions{Skip: nil, - OrderBy: to.Ptr("string"), - Top: to.Ptr[int32](1), - Version: to.Ptr("string"), - Description: to.Ptr("string"), - Offset: to.Ptr[int32](1), - Tags: to.Ptr("string"), - Properties: to.Ptr("string"), - Feed: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ModelVersionResourceArmPaginatedResult = armmachinelearning.ModelVersionResourceArmPaginatedResult{ - // Value: []*armmachinelearning.ModelVersion{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ModelVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // Flavors: map[string]*armmachinelearning.FlavorData{ - // "string": &armmachinelearning.FlavorData{ - // Data: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // }, - // }, - // ModelType: to.Ptr("CustomModel"), - // ModelURI: to.Ptr("string"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelVersion/delete.json -func ExampleModelVersionsClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewModelVersionsClient().Delete(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelVersion/get.json -func ExampleModelVersionsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewModelVersionsClient().Get(ctx, "test-rg", "my-aml-workspace", "string", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ModelVersion = armmachinelearning.ModelVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ModelVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // Flavors: map[string]*armmachinelearning.FlavorData{ - // "string": &armmachinelearning.FlavorData{ - // Data: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // }, - // }, - // ModelType: to.Ptr("CustomModel"), - // ModelURI: to.Ptr("string"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ModelVersion/createOrUpdate.json -func ExampleModelVersionsClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewModelVersionsClient().CreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", "string", armmachinelearning.ModelVersion{ - Properties: &armmachinelearning.ModelVersionProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - IsAnonymous: to.Ptr(false), - Flavors: map[string]*armmachinelearning.FlavorData{ - "string": { - Data: map[string]*string{ - "string": to.Ptr("string"), - }, - }, - }, - ModelType: to.Ptr("CustomModel"), - ModelURI: to.Ptr("string"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ModelVersion = armmachinelearning.ModelVersion{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Properties: &armmachinelearning.ModelVersionProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // IsAnonymous: to.Ptr(false), - // Flavors: map[string]*armmachinelearning.FlavorData{ - // "string": &armmachinelearning.FlavorData{ - // Data: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // }, - // }, - // ModelType: to.Ptr("CustomModel"), - // ModelURI: to.Ptr("string"), - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/onlinedeployments_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/onlinedeployments_client.go index 7cebb1f4793b..e5549efcf15b 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/onlinedeployments_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/onlinedeployments_client.go @@ -48,7 +48,7 @@ func NewOnlineDeploymentsClient(subscriptionID string, credential azcore.TokenCr // BeginCreateOrUpdate - Create or update Inference Endpoint Deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name. @@ -62,7 +62,9 @@ func (client *OnlineDeploymentsClient) BeginCreateOrUpdate(ctx context.Context, if err != nil { return nil, err } - return runtime.NewPoller[OnlineDeploymentsClientCreateOrUpdateResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[OnlineDeploymentsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) } else { return runtime.NewPollerFromResumeToken[OnlineDeploymentsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -71,7 +73,7 @@ func (client *OnlineDeploymentsClient) BeginCreateOrUpdate(ctx context.Context, // CreateOrUpdate - Create or update Inference Endpoint Deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *OnlineDeploymentsClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, deploymentName string, body OnlineDeployment, options *OnlineDeploymentsClientBeginCreateOrUpdateOptions) (*http.Response, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, deploymentName, body, options) if err != nil { @@ -115,7 +117,7 @@ func (client *OnlineDeploymentsClient) createOrUpdateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -124,7 +126,7 @@ func (client *OnlineDeploymentsClient) createOrUpdateCreateRequest(ctx context.C // BeginDelete - Delete Inference Endpoint Deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name. @@ -137,7 +139,9 @@ func (client *OnlineDeploymentsClient) BeginDelete(ctx context.Context, resource if err != nil { return nil, err } - return runtime.NewPoller[OnlineDeploymentsClientDeleteResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[OnlineDeploymentsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[OnlineDeploymentsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -146,7 +150,7 @@ func (client *OnlineDeploymentsClient) BeginDelete(ctx context.Context, resource // Delete - Delete Inference Endpoint Deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *OnlineDeploymentsClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, deploymentName string, options *OnlineDeploymentsClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, deploymentName, options) if err != nil { @@ -190,7 +194,7 @@ func (client *OnlineDeploymentsClient) deleteCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -199,7 +203,7 @@ func (client *OnlineDeploymentsClient) deleteCreateRequest(ctx context.Context, // Get - Get Inference Deployment Deployment. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name. @@ -248,7 +252,7 @@ func (client *OnlineDeploymentsClient) getCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -266,7 +270,7 @@ func (client *OnlineDeploymentsClient) getHandleResponse(resp *http.Response) (O // GetLogs - Polls an Endpoint operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name. @@ -317,7 +321,7 @@ func (client *OnlineDeploymentsClient) getLogsCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -334,7 +338,7 @@ func (client *OnlineDeploymentsClient) getLogsHandleResponse(resp *http.Response // NewListPager - List Inference Endpoint Deployments. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name. @@ -392,7 +396,7 @@ func (client *OnlineDeploymentsClient) listCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.OrderBy != nil { reqQP.Set("$orderBy", *options.OrderBy) } @@ -418,7 +422,7 @@ func (client *OnlineDeploymentsClient) listHandleResponse(resp *http.Response) ( // NewListSKUsPager - List Inference Endpoint Deployment Skus. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Inference endpoint name. @@ -481,7 +485,7 @@ func (client *OnlineDeploymentsClient) listSKUsCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Count != nil { reqQP.Set("count", strconv.FormatInt(int64(*options.Count), 10)) } @@ -505,7 +509,7 @@ func (client *OnlineDeploymentsClient) listSKUsHandleResponse(resp *http.Respons // BeginUpdate - Update Online Deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -528,7 +532,7 @@ func (client *OnlineDeploymentsClient) BeginUpdate(ctx context.Context, resource // Update - Update Online Deployment (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *OnlineDeploymentsClient) update(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, deploymentName string, body PartialMinimalTrackedResourceWithSKU, options *OnlineDeploymentsClientBeginUpdateOptions) (*http.Response, error) { req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, deploymentName, body, options) if err != nil { @@ -572,7 +576,7 @@ func (client *OnlineDeploymentsClient) updateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/onlinedeployments_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/onlinedeployments_client_example_test.go deleted file mode 100644 index 382daf130b86..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/onlinedeployments_client_example_test.go +++ /dev/null @@ -1,1017 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/list.json -func ExampleOnlineDeploymentsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewOnlineDeploymentsClient().NewListPager("test-rg", "my-aml-workspace", "testEndpointName", &armmachinelearning.OnlineDeploymentsClientListOptions{OrderBy: to.Ptr("string"), - Top: to.Ptr[int32](1), - Skip: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.OnlineDeploymentTrackedResourceArmPaginatedResult = armmachinelearning.OnlineDeploymentTrackedResourceArmPaginatedResult{ - // Value: []*armmachinelearning.OnlineDeployment{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.KubernetesOnlineDeployment{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // AppInsightsEnabled: to.Ptr(false), - // EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeKubernetes), - // InstanceType: to.Ptr("string"), - // LivenessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](1), - // InitialDelay: to.Ptr("PT5M"), - // Period: to.Ptr("PT5M"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // Model: to.Ptr("string"), - // ModelMountPath: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // RequestSettings: &armmachinelearning.OnlineRequestSettings{ - // MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - // MaxQueueWait: to.Ptr("PT5M"), - // RequestTimeout: to.Ptr("PT5M"), - // }, - // ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - // ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - // }, - // ContainerResourceRequirements: &armmachinelearning.ContainerResourceRequirements{ - // ContainerResourceLimits: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // ContainerResourceRequests: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/delete.json -func ExampleOnlineDeploymentsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineDeploymentsClient().BeginDelete(ctx, "testrg123", "workspace123", "testEndpoint", "testDeployment", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/get.json -func ExampleOnlineDeploymentsClient_Get_getKubernetesOnlineDeployment() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewOnlineDeploymentsClient().Get(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineDeployment = armmachinelearning.OnlineDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.KubernetesOnlineDeployment{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // AppInsightsEnabled: to.Ptr(false), - // EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeKubernetes), - // InstanceType: to.Ptr("string"), - // LivenessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](1), - // InitialDelay: to.Ptr("PT5M"), - // Period: to.Ptr("PT5M"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // Model: to.Ptr("string"), - // ModelMountPath: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // RequestSettings: &armmachinelearning.OnlineRequestSettings{ - // MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - // MaxQueueWait: to.Ptr("PT5M"), - // RequestTimeout: to.Ptr("PT5M"), - // }, - // ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - // ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - // }, - // ContainerResourceRequirements: &armmachinelearning.ContainerResourceRequirements{ - // ContainerResourceLimits: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // ContainerResourceRequests: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/get.json -func ExampleOnlineDeploymentsClient_Get_getManagedOnlineDeployment() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewOnlineDeploymentsClient().Get(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineDeployment = armmachinelearning.OnlineDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.ManagedOnlineDeployment{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // AppInsightsEnabled: to.Ptr(false), - // EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeManaged), - // InstanceType: to.Ptr("string"), - // LivenessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](1), - // InitialDelay: to.Ptr("PT5M"), - // Period: to.Ptr("PT5M"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // Model: to.Ptr("string"), - // ModelMountPath: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // ReadinessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](30), - // InitialDelay: to.Ptr("PT1S"), - // Period: to.Ptr("PT10S"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT2S"), - // }, - // RequestSettings: &armmachinelearning.OnlineRequestSettings{ - // MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - // MaxQueueWait: to.Ptr("PT5M"), - // RequestTimeout: to.Ptr("PT5M"), - // }, - // ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - // ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/update.json -func ExampleOnlineDeploymentsClient_BeginUpdate_updateKubernetesOnlineDeployment() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineDeploymentsClient().BeginUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", armmachinelearning.PartialMinimalTrackedResourceWithSKU{ - Tags: map[string]*string{}, - SKU: &armmachinelearning.PartialSKU{ - Name: to.Ptr("string"), - Capacity: to.Ptr[int32](1), - Family: to.Ptr("string"), - Size: to.Ptr("string"), - Tier: to.Ptr(armmachinelearning.SKUTierFree), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineDeployment = armmachinelearning.OnlineDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.KubernetesOnlineDeployment{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // AppInsightsEnabled: to.Ptr(false), - // EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeKubernetes), - // InstanceType: to.Ptr("string"), - // LivenessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](1), - // InitialDelay: to.Ptr("PT5M"), - // Period: to.Ptr("PT5M"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // Model: to.Ptr("string"), - // ModelMountPath: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // RequestSettings: &armmachinelearning.OnlineRequestSettings{ - // MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - // MaxQueueWait: to.Ptr("PT5M"), - // RequestTimeout: to.Ptr("PT5M"), - // }, - // ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - // ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - // }, - // ContainerResourceRequirements: &armmachinelearning.ContainerResourceRequirements{ - // ContainerResourceLimits: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // ContainerResourceRequests: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/update.json -func ExampleOnlineDeploymentsClient_BeginUpdate_updateManagedOnlineDeployment() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineDeploymentsClient().BeginUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", armmachinelearning.PartialMinimalTrackedResourceWithSKU{ - Tags: map[string]*string{}, - SKU: &armmachinelearning.PartialSKU{ - Name: to.Ptr("string"), - Capacity: to.Ptr[int32](1), - Family: to.Ptr("string"), - Size: to.Ptr("string"), - Tier: to.Ptr(armmachinelearning.SKUTierFree), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineDeployment = armmachinelearning.OnlineDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.ManagedOnlineDeployment{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // AppInsightsEnabled: to.Ptr(false), - // EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeManaged), - // InstanceType: to.Ptr("string"), - // LivenessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](1), - // InitialDelay: to.Ptr("PT5M"), - // Period: to.Ptr("PT5M"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // Model: to.Ptr("string"), - // ModelMountPath: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // ReadinessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](30), - // InitialDelay: to.Ptr("PT1S"), - // Period: to.Ptr("PT10S"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT2S"), - // }, - // RequestSettings: &armmachinelearning.OnlineRequestSettings{ - // MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - // MaxQueueWait: to.Ptr("PT5M"), - // RequestTimeout: to.Ptr("PT5M"), - // }, - // ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - // ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/createOrUpdate.json -func ExampleOnlineDeploymentsClient_BeginCreateOrUpdate_createOrUpdateKubernetesOnlineDeployment() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineDeploymentsClient().BeginCreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", armmachinelearning.OnlineDeployment{ - Location: to.Ptr("string"), - Tags: map[string]*string{}, - Identity: &armmachinelearning.ManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - "string": {}, - }, - }, - Kind: to.Ptr("string"), - Properties: &armmachinelearning.KubernetesOnlineDeployment{ - Description: to.Ptr("string"), - CodeConfiguration: &armmachinelearning.CodeConfiguration{ - CodeID: to.Ptr("string"), - ScoringScript: to.Ptr("string"), - }, - EnvironmentID: to.Ptr("string"), - EnvironmentVariables: map[string]*string{ - "string": to.Ptr("string"), - }, - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - AppInsightsEnabled: to.Ptr(false), - EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeKubernetes), - InstanceType: to.Ptr("string"), - LivenessProbe: &armmachinelearning.ProbeSettings{ - FailureThreshold: to.Ptr[int32](1), - InitialDelay: to.Ptr("PT5M"), - Period: to.Ptr("PT5M"), - SuccessThreshold: to.Ptr[int32](1), - Timeout: to.Ptr("PT5M"), - }, - Model: to.Ptr("string"), - ModelMountPath: to.Ptr("string"), - RequestSettings: &armmachinelearning.OnlineRequestSettings{ - MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - MaxQueueWait: to.Ptr("PT5M"), - RequestTimeout: to.Ptr("PT5M"), - }, - ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - }, - ContainerResourceRequirements: &armmachinelearning.ContainerResourceRequirements{ - ContainerResourceLimits: &armmachinelearning.ContainerResourceSettings{ - CPU: to.Ptr("\"1\""), - Gpu: to.Ptr("\"1\""), - Memory: to.Ptr("\"2Gi\""), - }, - ContainerResourceRequests: &armmachinelearning.ContainerResourceSettings{ - CPU: to.Ptr("\"1\""), - Gpu: to.Ptr("\"1\""), - Memory: to.Ptr("\"2Gi\""), - }, - }, - }, - SKU: &armmachinelearning.SKU{ - Name: to.Ptr("string"), - Capacity: to.Ptr[int32](1), - Family: to.Ptr("string"), - Size: to.Ptr("string"), - Tier: to.Ptr(armmachinelearning.SKUTierFree), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineDeployment = armmachinelearning.OnlineDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.KubernetesOnlineDeployment{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // AppInsightsEnabled: to.Ptr(false), - // EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeKubernetes), - // InstanceType: to.Ptr("string"), - // LivenessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](1), - // InitialDelay: to.Ptr("PT5M"), - // Period: to.Ptr("PT5M"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // Model: to.Ptr("string"), - // ModelMountPath: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // RequestSettings: &armmachinelearning.OnlineRequestSettings{ - // MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - // MaxQueueWait: to.Ptr("PT5M"), - // RequestTimeout: to.Ptr("PT5M"), - // }, - // ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - // ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - // }, - // ContainerResourceRequirements: &armmachinelearning.ContainerResourceRequirements{ - // ContainerResourceLimits: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // ContainerResourceRequests: &armmachinelearning.ContainerResourceSettings{ - // CPU: to.Ptr("\"1\""), - // Gpu: to.Ptr("\"1\""), - // Memory: to.Ptr("\"2Gi\""), - // }, - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/createOrUpdate.json -func ExampleOnlineDeploymentsClient_BeginCreateOrUpdate_createOrUpdateManagedOnlineDeployment() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineDeploymentsClient().BeginCreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", armmachinelearning.OnlineDeployment{ - Location: to.Ptr("string"), - Tags: map[string]*string{}, - Identity: &armmachinelearning.ManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - "string": {}, - }, - }, - Kind: to.Ptr("string"), - Properties: &armmachinelearning.ManagedOnlineDeployment{ - Description: to.Ptr("string"), - CodeConfiguration: &armmachinelearning.CodeConfiguration{ - CodeID: to.Ptr("string"), - ScoringScript: to.Ptr("string"), - }, - EnvironmentID: to.Ptr("string"), - EnvironmentVariables: map[string]*string{ - "string": to.Ptr("string"), - }, - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - AppInsightsEnabled: to.Ptr(false), - EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeManaged), - InstanceType: to.Ptr("string"), - LivenessProbe: &armmachinelearning.ProbeSettings{ - FailureThreshold: to.Ptr[int32](1), - InitialDelay: to.Ptr("PT5M"), - Period: to.Ptr("PT5M"), - SuccessThreshold: to.Ptr[int32](1), - Timeout: to.Ptr("PT5M"), - }, - Model: to.Ptr("string"), - ModelMountPath: to.Ptr("string"), - ReadinessProbe: &armmachinelearning.ProbeSettings{ - FailureThreshold: to.Ptr[int32](30), - InitialDelay: to.Ptr("PT1S"), - Period: to.Ptr("PT10S"), - SuccessThreshold: to.Ptr[int32](1), - Timeout: to.Ptr("PT2S"), - }, - RequestSettings: &armmachinelearning.OnlineRequestSettings{ - MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - MaxQueueWait: to.Ptr("PT5M"), - RequestTimeout: to.Ptr("PT5M"), - }, - ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - }, - }, - SKU: &armmachinelearning.SKU{ - Name: to.Ptr("string"), - Capacity: to.Ptr[int32](1), - Family: to.Ptr("string"), - Size: to.Ptr("string"), - Tier: to.Ptr(armmachinelearning.SKUTierFree), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineDeployment = armmachinelearning.OnlineDeployment{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.ManagedOnlineDeployment{ - // Description: to.Ptr("string"), - // CodeConfiguration: &armmachinelearning.CodeConfiguration{ - // CodeID: to.Ptr("string"), - // ScoringScript: to.Ptr("string"), - // }, - // EnvironmentID: to.Ptr("string"), - // EnvironmentVariables: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // AppInsightsEnabled: to.Ptr(false), - // EndpointComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeManaged), - // InstanceType: to.Ptr("string"), - // LivenessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](1), - // InitialDelay: to.Ptr("PT5M"), - // Period: to.Ptr("PT5M"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT5M"), - // }, - // Model: to.Ptr("string"), - // ModelMountPath: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.DeploymentProvisioningStateSucceeded), - // ReadinessProbe: &armmachinelearning.ProbeSettings{ - // FailureThreshold: to.Ptr[int32](30), - // InitialDelay: to.Ptr("PT1S"), - // Period: to.Ptr("PT10S"), - // SuccessThreshold: to.Ptr[int32](1), - // Timeout: to.Ptr("PT2S"), - // }, - // RequestSettings: &armmachinelearning.OnlineRequestSettings{ - // MaxConcurrentRequestsPerInstance: to.Ptr[int32](1), - // MaxQueueWait: to.Ptr("PT5M"), - // RequestTimeout: to.Ptr("PT5M"), - // }, - // ScaleSettings: &armmachinelearning.DefaultScaleSettings{ - // ScaleType: to.Ptr(armmachinelearning.ScaleTypeDefault), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/getLogs.json -func ExampleOnlineDeploymentsClient_GetLogs() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewOnlineDeploymentsClient().GetLogs(ctx, "testrg123", "workspace123", "testEndpoint", "testDeployment", armmachinelearning.DeploymentLogsRequest{ - ContainerType: to.Ptr(armmachinelearning.ContainerTypeStorageInitializer), - Tail: to.Ptr[int32](0), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.DeploymentLogs = armmachinelearning.DeploymentLogs{ - // Content: to.Ptr("string"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/listSkus.json -func ExampleOnlineDeploymentsClient_NewListSKUsPager_listKubernetesOnlineDeploymentSkus() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewOnlineDeploymentsClient().NewListSKUsPager("test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", &armmachinelearning.OnlineDeploymentsClientListSKUsOptions{Count: to.Ptr[int32](1), - Skip: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.SKUResourceArmPaginatedResult = armmachinelearning.SKUResourceArmPaginatedResult{ - // Value: []*armmachinelearning.SKUResource{ - // { - // Capacity: &armmachinelearning.SKUCapacity{ - // Default: to.Ptr[int32](1), - // Maximum: to.Ptr[int32](1), - // Minimum: to.Ptr[int32](1), - // ScaleType: to.Ptr(armmachinelearning.SKUScaleTypeAutomatic), - // }, - // ResourceType: to.Ptr("Microsoft.MachineLearning.Services/endpoints/deployments"), - // SKU: &armmachinelearning.SKUSetting{ - // Name: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/listSkus.json -func ExampleOnlineDeploymentsClient_NewListSKUsPager_listManagedOnlineDeploymentSkus() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewOnlineDeploymentsClient().NewListSKUsPager("test-rg", "my-aml-workspace", "testEndpointName", "testDeploymentName", &armmachinelearning.OnlineDeploymentsClientListSKUsOptions{Count: to.Ptr[int32](1), - Skip: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.SKUResourceArmPaginatedResult = armmachinelearning.SKUResourceArmPaginatedResult{ - // Value: []*armmachinelearning.SKUResource{ - // { - // Capacity: &armmachinelearning.SKUCapacity{ - // Default: to.Ptr[int32](1), - // Maximum: to.Ptr[int32](1), - // Minimum: to.Ptr[int32](1), - // ScaleType: to.Ptr(armmachinelearning.SKUScaleTypeAutomatic), - // }, - // ResourceType: to.Ptr("Microsoft.MachineLearning.Services/endpoints/deployments"), - // SKU: &armmachinelearning.SKUSetting{ - // Name: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // }}, - // } - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/onlineendpoints_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/onlineendpoints_client.go index 87bb16675658..b3f8a310d0ef 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/onlineendpoints_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/onlineendpoints_client.go @@ -48,7 +48,7 @@ func NewOnlineEndpointsClient(subscriptionID string, credential azcore.TokenCred // BeginCreateOrUpdate - Create or update Online Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -61,7 +61,9 @@ func (client *OnlineEndpointsClient) BeginCreateOrUpdate(ctx context.Context, re if err != nil { return nil, err } - return runtime.NewPoller[OnlineEndpointsClientCreateOrUpdateResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[OnlineEndpointsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) } else { return runtime.NewPollerFromResumeToken[OnlineEndpointsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -70,7 +72,7 @@ func (client *OnlineEndpointsClient) BeginCreateOrUpdate(ctx context.Context, re // CreateOrUpdate - Create or update Online Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *OnlineEndpointsClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, body OnlineEndpoint, options *OnlineEndpointsClientBeginCreateOrUpdateOptions) (*http.Response, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, body, options) if err != nil { @@ -110,7 +112,7 @@ func (client *OnlineEndpointsClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -119,7 +121,7 @@ func (client *OnlineEndpointsClient) createOrUpdateCreateRequest(ctx context.Con // BeginDelete - Delete Online Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -131,7 +133,9 @@ func (client *OnlineEndpointsClient) BeginDelete(ctx context.Context, resourceGr if err != nil { return nil, err } - return runtime.NewPoller[OnlineEndpointsClientDeleteResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[OnlineEndpointsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[OnlineEndpointsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -140,7 +144,7 @@ func (client *OnlineEndpointsClient) BeginDelete(ctx context.Context, resourceGr // Delete - Delete Online Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *OnlineEndpointsClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, options *OnlineEndpointsClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, options) if err != nil { @@ -180,7 +184,7 @@ func (client *OnlineEndpointsClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -189,7 +193,7 @@ func (client *OnlineEndpointsClient) deleteCreateRequest(ctx context.Context, re // Get - Get Online Endpoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -233,7 +237,7 @@ func (client *OnlineEndpointsClient) getCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -248,10 +252,10 @@ func (client *OnlineEndpointsClient) getHandleResponse(resp *http.Response) (Onl return result, nil } -// GetToken - Retrieve a valid AAD token for an Endpoint using AMLToken-based authentication. +// GetToken - Retrieve a valid AML token for an Endpoint using AMLToken-based authentication. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -296,7 +300,7 @@ func (client *OnlineEndpointsClient) getTokenCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -313,7 +317,7 @@ func (client *OnlineEndpointsClient) getTokenHandleResponse(resp *http.Response) // NewListPager - List Online Endpoints. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - OnlineEndpointsClientListOptions contains the optional parameters for the OnlineEndpointsClient.NewListPager @@ -366,7 +370,7 @@ func (client *OnlineEndpointsClient) listCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Name != nil { reqQP.Set("name", *options.Name) } @@ -405,7 +409,7 @@ func (client *OnlineEndpointsClient) listHandleResponse(resp *http.Response) (On // ListKeys - List EndpointAuthKeys for an Endpoint using Key-based authentication. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -450,7 +454,7 @@ func (client *OnlineEndpointsClient) listKeysCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -468,7 +472,7 @@ func (client *OnlineEndpointsClient) listKeysHandleResponse(resp *http.Response) // BeginRegenerateKeys - Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -492,7 +496,7 @@ func (client *OnlineEndpointsClient) BeginRegenerateKeys(ctx context.Context, re // RegenerateKeys - Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *OnlineEndpointsClient) regenerateKeys(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, body RegenerateEndpointKeysRequest, options *OnlineEndpointsClientBeginRegenerateKeysOptions) (*http.Response, error) { req, err := client.regenerateKeysCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, body, options) if err != nil { @@ -532,7 +536,7 @@ func (client *OnlineEndpointsClient) regenerateKeysCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -541,7 +545,7 @@ func (client *OnlineEndpointsClient) regenerateKeysCreateRequest(ctx context.Con // BeginUpdate - Update Online Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - endpointName - Online Endpoint name. @@ -563,7 +567,7 @@ func (client *OnlineEndpointsClient) BeginUpdate(ctx context.Context, resourceGr // Update - Update Online Endpoint (asynchronous). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *OnlineEndpointsClient) update(ctx context.Context, resourceGroupName string, workspaceName string, endpointName string, body PartialMinimalTrackedResourceWithIdentity, options *OnlineEndpointsClientBeginUpdateOptions) (*http.Response, error) { req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, endpointName, body, options) if err != nil { @@ -603,7 +607,7 @@ func (client *OnlineEndpointsClient) updateCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/onlineendpoints_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/onlineendpoints_client_example_test.go deleted file mode 100644 index e4acc4d04e84..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/onlineendpoints_client_example_test.go +++ /dev/null @@ -1,449 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/list.json -func ExampleOnlineEndpointsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewOnlineEndpointsClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.OnlineEndpointsClientListOptions{Name: to.Ptr("string"), - Count: to.Ptr[int32](1), - ComputeType: to.Ptr(armmachinelearning.EndpointComputeTypeManaged), - Skip: nil, - Tags: to.Ptr("string"), - Properties: to.Ptr("string"), - OrderBy: to.Ptr(armmachinelearning.OrderStringCreatedAtDesc), - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.OnlineEndpointTrackedResourceArmPaginatedResult = armmachinelearning.OnlineEndpointTrackedResourceArmPaginatedResult{ - // Value: []*armmachinelearning.OnlineEndpoint{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.OnlineEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Compute: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.EndpointProvisioningStateSucceeded), - // Traffic: map[string]*int32{ - // "string": to.Ptr[int32](1), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/delete.json -func ExampleOnlineEndpointsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineEndpointsClient().BeginDelete(ctx, "test-rg", "my-aml-workspace", "testEndpointName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/get.json -func ExampleOnlineEndpointsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewOnlineEndpointsClient().Get(ctx, "test-rg", "my-aml-workspace", "testEndpointName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineEndpoint = armmachinelearning.OnlineEndpoint{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.OnlineEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Compute: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.EndpointProvisioningStateSucceeded), - // Traffic: map[string]*int32{ - // "string": to.Ptr[int32](1), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/update.json -func ExampleOnlineEndpointsClient_BeginUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineEndpointsClient().BeginUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", armmachinelearning.PartialMinimalTrackedResourceWithIdentity{ - Tags: map[string]*string{}, - Identity: &armmachinelearning.PartialManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - UserAssignedIdentities: map[string]any{ - "string": map[string]any{}, - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineEndpoint = armmachinelearning.OnlineEndpoint{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.OnlineEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Compute: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.EndpointProvisioningStateSucceeded), - // Traffic: map[string]*int32{ - // "string": to.Ptr[int32](1), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/createOrUpdate.json -func ExampleOnlineEndpointsClient_BeginCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineEndpointsClient().BeginCreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "testEndpointName", armmachinelearning.OnlineEndpoint{ - Location: to.Ptr("string"), - Tags: map[string]*string{}, - Identity: &armmachinelearning.ManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - "string": {}, - }, - }, - Kind: to.Ptr("string"), - Properties: &armmachinelearning.OnlineEndpointProperties{ - Description: to.Ptr("string"), - AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Compute: to.Ptr("string"), - Traffic: map[string]*int32{ - "string": to.Ptr[int32](1), - }, - }, - SKU: &armmachinelearning.SKU{ - Name: to.Ptr("string"), - Capacity: to.Ptr[int32](1), - Family: to.Ptr("string"), - Size: to.Ptr("string"), - Tier: to.Ptr(armmachinelearning.SKUTierFree), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.OnlineEndpoint = armmachinelearning.OnlineEndpoint{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeUser), - // }, - // Location: to.Ptr("string"), - // Tags: map[string]*string{ - // }, - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "string": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Kind: to.Ptr("string"), - // Properties: &armmachinelearning.OnlineEndpointProperties{ - // Description: to.Ptr("string"), - // AuthMode: to.Ptr(armmachinelearning.EndpointAuthModeAMLToken), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // ScoringURI: to.Ptr("https://www.contoso.com/example"), - // SwaggerURI: to.Ptr("https://www.contoso.com/example"), - // Compute: to.Ptr("string"), - // ProvisioningState: to.Ptr(armmachinelearning.EndpointProvisioningStateSucceeded), - // Traffic: map[string]*int32{ - // "string": to.Ptr[int32](1), - // }, - // }, - // SKU: &armmachinelearning.SKU{ - // Name: to.Ptr("string"), - // Capacity: to.Ptr[int32](1), - // Family: to.Ptr("string"), - // Size: to.Ptr("string"), - // Tier: to.Ptr(armmachinelearning.SKUTierFree), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/listKeys.json -func ExampleOnlineEndpointsClient_ListKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewOnlineEndpointsClient().ListKeys(ctx, "test-rg", "my-aml-workspace", "testEndpointName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.EndpointAuthKeys = armmachinelearning.EndpointAuthKeys{ - // PrimaryKey: to.Ptr("string"), - // SecondaryKey: to.Ptr("string"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/regenerateKeys.json -func ExampleOnlineEndpointsClient_BeginRegenerateKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewOnlineEndpointsClient().BeginRegenerateKeys(ctx, "test-rg", "my-aml-workspace", "testEndpointName", armmachinelearning.RegenerateEndpointKeysRequest{ - KeyType: to.Ptr(armmachinelearning.KeyTypePrimary), - KeyValue: to.Ptr("string"), - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/OnlineEndpoint/getToken.json -func ExampleOnlineEndpointsClient_GetToken() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewOnlineEndpointsClient().GetToken(ctx, "test-rg", "my-aml-workspace", "testEndpointName", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.EndpointAuthToken = armmachinelearning.EndpointAuthToken{ - // AccessToken: to.Ptr("string"), - // ExpiryTimeUTC: to.Ptr[int64](1), - // RefreshAfterTimeUTC: to.Ptr[int64](1), - // TokenType: to.Ptr("string"), - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/operations_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/operations_client.go index 72feb7c70761..8ab13243ee9d 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/operations_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/operations_client.go @@ -38,9 +38,9 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO return client, nil } -// NewListPager - Lists all of the available Azure Machine Learning Workspaces REST API operations. +// NewListPager - Lists all of the available Azure Machine Learning Workspaces REST API operations // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -72,7 +72,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/operations_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/operations_client_example_test.go deleted file mode 100644 index 725beda489e9..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/operations_client_example_test.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/operationsList.json -func ExampleOperationsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewOperationsClient().NewListPager(nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.AmlOperationListResult = armmachinelearning.AmlOperationListResult{ - // Value: []*armmachinelearning.AmlOperation{ - // { - // Name: to.Ptr("Microsoft.MachineLearningServices/workspaces/write"), - // Display: &armmachinelearning.AmlOperationDisplay{ - // Operation: to.Ptr("Create/Update Machine Learning workspaces"), - // Provider: to.Ptr("Microsoft MachineLearningServices"), - // Resource: to.Ptr("workspaces"), - // }, - // }, - // { - // Name: to.Ptr("Microsoft.MachineLearningServices/workspaces/delete"), - // Display: &armmachinelearning.AmlOperationDisplay{ - // Operation: to.Ptr("Delete Machine Learning workspaces"), - // Provider: to.Ptr("Microsoft MachineLearningServices"), - // Resource: to.Ptr("workspaces"), - // }, - // }, - // { - // Name: to.Ptr("Microsoft.MachineLearningServices/workspaces/listkeys/action"), - // Display: &armmachinelearning.AmlOperationDisplay{ - // Operation: to.Ptr("List workspace Keys"), - // Provider: to.Ptr("Microsoft MachineLearningServices"), - // Resource: to.Ptr("workspaces"), - // }, - // }}, - // } - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/polymorphic_helpers.go b/sdk/resourcemanager/machinelearning/armmachinelearning/polymorphic_helpers.go index 0514343147e4..e64673b7ff4a 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/polymorphic_helpers.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/polymorphic_helpers.go @@ -69,6 +69,42 @@ func unmarshalAutoMLVerticalClassification(rawMsg json.RawMessage) (AutoMLVertic return b, json.Unmarshal(rawMsg, b) } +func unmarshalBaseEnvironmentSourceClassification(rawMsg json.RawMessage) (BaseEnvironmentSourceClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b BaseEnvironmentSourceClassification + switch m["baseEnvironmentSourceType"] { + case string(BaseEnvironmentSourceTypeEnvironmentAsset): + b = &BaseEnvironmentID{} + default: + b = &BaseEnvironmentSource{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalBatchDeploymentConfigurationClassification(rawMsg json.RawMessage) (BatchDeploymentConfigurationClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b BatchDeploymentConfigurationClassification + switch m["deploymentConfigurationType"] { + case string(BatchDeploymentConfigurationTypePipelineComponent): + b = &BatchPipelineComponentDeploymentConfiguration{} + default: + b = &BatchDeploymentConfiguration{} + } + return b, json.Unmarshal(rawMsg, b) +} + func unmarshalComputeClassification(rawMsg json.RawMessage) (ComputeClassification, error) { if rawMsg == nil { return nil, nil @@ -127,6 +163,104 @@ func unmarshalComputeSecretsClassification(rawMsg json.RawMessage) (ComputeSecre return b, json.Unmarshal(rawMsg, b) } +func unmarshalDataDriftMetricThresholdBaseClassification(rawMsg json.RawMessage) (DataDriftMetricThresholdBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b DataDriftMetricThresholdBaseClassification + switch m["dataType"] { + case string(MonitoringFeatureDataTypeCategorical): + b = &CategoricalDataDriftMetricThreshold{} + case string(MonitoringFeatureDataTypeNumerical): + b = &NumericalDataDriftMetricThreshold{} + default: + b = &DataDriftMetricThresholdBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalDataDriftMetricThresholdBaseClassificationArray(rawMsg json.RawMessage) ([]DataDriftMetricThresholdBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]DataDriftMetricThresholdBaseClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalDataDriftMetricThresholdBaseClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + +func unmarshalDataImportSourceClassification(rawMsg json.RawMessage) (DataImportSourceClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b DataImportSourceClassification + switch m["sourceType"] { + case string(DataImportSourceTypeDatabase): + b = &DatabaseSource{} + case string(DataImportSourceTypeFileSystem): + b = &FileSystemSource{} + default: + b = &DataImportSource{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalDataQualityMetricThresholdBaseClassification(rawMsg json.RawMessage) (DataQualityMetricThresholdBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b DataQualityMetricThresholdBaseClassification + switch m["dataType"] { + case string(MonitoringFeatureDataTypeCategorical): + b = &CategoricalDataQualityMetricThreshold{} + case string(MonitoringFeatureDataTypeNumerical): + b = &NumericalDataQualityMetricThreshold{} + default: + b = &DataQualityMetricThresholdBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalDataQualityMetricThresholdBaseClassificationArray(rawMsg json.RawMessage) ([]DataQualityMetricThresholdBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]DataQualityMetricThresholdBaseClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalDataQualityMetricThresholdBaseClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + func unmarshalDataVersionBasePropertiesClassification(rawMsg json.RawMessage) (DataVersionBasePropertiesClassification, error) { if rawMsg == nil { return nil, nil @@ -163,6 +297,10 @@ func unmarshalDatastoreCredentialsClassification(rawMsg json.RawMessage) (Datast b = &AccountKeyDatastoreCredentials{} case string(CredentialsTypeCertificate): b = &CertificateDatastoreCredentials{} + case string(CredentialsTypeKerberosKeytab): + b = &KerberosKeytabCredentials{} + case string(CredentialsTypeKerberosPassword): + b = &KerberosPasswordCredentials{} case string(CredentialsTypeNone): b = &NoneDatastoreCredentials{} case string(CredentialsTypeSas): @@ -193,6 +331,10 @@ func unmarshalDatastorePropertiesClassification(rawMsg json.RawMessage) (Datasto b = &AzureDataLakeGen2Datastore{} case string(DatastoreTypeAzureFile): b = &AzureFileDatastore{} + case string(DatastoreTypeHdfs): + b = &HdfsDatastore{} + case string(DatastoreTypeOneLake): + b = &OneLakeDatastore{} default: b = &DatastoreProperties{} } @@ -213,6 +355,10 @@ func unmarshalDatastoreSecretsClassification(rawMsg json.RawMessage) (DatastoreS b = &AccountKeyDatastoreSecrets{} case string(SecretsTypeCertificate): b = &CertificateDatastoreSecrets{} + case string(SecretsTypeKerberosKeytab): + b = &KerberosKeytabSecrets{} + case string(SecretsTypeKerberosPassword): + b = &KerberosPasswordSecrets{} case string(SecretsTypeSas): b = &SasDatastoreSecrets{} case string(SecretsTypeServicePrincipal): @@ -237,6 +383,8 @@ func unmarshalDistributionConfigurationClassification(rawMsg json.RawMessage) (D b = &Mpi{} case string(DistributionTypePyTorch): b = &PyTorch{} + case string(DistributionTypeRay): + b = &Ray{} case string(DistributionTypeTensorFlow): b = &TensorFlow{} default: @@ -267,6 +415,28 @@ func unmarshalEarlyTerminationPolicyClassification(rawMsg json.RawMessage) (Earl return b, json.Unmarshal(rawMsg, b) } +func unmarshalExportSummaryClassification(rawMsg json.RawMessage) (ExportSummaryClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ExportSummaryClassification + switch m["format"] { + case string(ExportFormatTypeCSV): + b = &CSVExportSummary{} + case string(ExportFormatTypeCoco): + b = &CocoExportSummary{} + case string(ExportFormatTypeDataset): + b = &DatasetExportSummary{} + default: + b = &ExportSummary{} + } + return b, json.Unmarshal(rawMsg, b) +} + func unmarshalForecastHorizonClassification(rawMsg json.RawMessage) (ForecastHorizonClassification, error) { if rawMsg == nil { return nil, nil @@ -309,6 +479,30 @@ func unmarshalIdentityConfigurationClassification(rawMsg json.RawMessage) (Ident return b, json.Unmarshal(rawMsg, b) } +func unmarshalInferencingServerClassification(rawMsg json.RawMessage) (InferencingServerClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b InferencingServerClassification + switch m["serverType"] { + case string(InferencingServerTypeAzureMLBatch): + b = &AzureMLBatchInferencingServer{} + case string(InferencingServerTypeAzureMLOnline): + b = &AzureMLOnlineInferencingServer{} + case string(InferencingServerTypeCustom): + b = &CustomInferencingServer{} + case string(InferencingServerTypeTriton): + b = &TritonInferencingServer{} + default: + b = &InferencingServer{} + } + return b, json.Unmarshal(rawMsg, b) +} + func unmarshalJobBasePropertiesClassification(rawMsg json.RawMessage) (JobBasePropertiesClassification, error) { if rawMsg == nil { return nil, nil @@ -323,8 +517,12 @@ func unmarshalJobBasePropertiesClassification(rawMsg json.RawMessage) (JobBasePr b = &AutoMLJob{} case string(JobTypeCommand): b = &CommandJob{} + case string(JobTypeLabeling): + b = &LabelingJobProperties{} case string(JobTypePipeline): b = &PipelineJob{} + case string(JobTypeSpark): + b = &SparkJob{} case string(JobTypeSweep): b = &SweepJob{} default: @@ -429,6 +627,257 @@ func unmarshalJobOutputClassificationMap(rawMsg json.RawMessage) (map[string]Job return fMap, nil } +func unmarshalLabelingJobMediaPropertiesClassification(rawMsg json.RawMessage) (LabelingJobMediaPropertiesClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b LabelingJobMediaPropertiesClassification + switch m["mediaType"] { + case string(MediaTypeImage): + b = &LabelingJobImageProperties{} + case string(MediaTypeText): + b = &LabelingJobTextProperties{} + default: + b = &LabelingJobMediaProperties{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMLAssistConfigurationClassification(rawMsg json.RawMessage) (MLAssistConfigurationClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b MLAssistConfigurationClassification + switch m["mlAssist"] { + case string(MLAssistConfigurationTypeDisabled): + b = &MLAssistConfigurationDisabled{} + case string(MLAssistConfigurationTypeEnabled): + b = &MLAssistConfigurationEnabled{} + default: + b = &MLAssistConfiguration{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalModelPerformanceMetricThresholdBaseClassification(rawMsg json.RawMessage) (ModelPerformanceMetricThresholdBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ModelPerformanceMetricThresholdBaseClassification + switch m["modelType"] { + case string(MonitoringModelTypeClassification): + b = &ClassificationModelPerformanceMetricThreshold{} + case string(MonitoringModelTypeRegression): + b = &RegressionModelPerformanceMetricThreshold{} + default: + b = &ModelPerformanceMetricThresholdBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMonitorComputeConfigurationBaseClassification(rawMsg json.RawMessage) (MonitorComputeConfigurationBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b MonitorComputeConfigurationBaseClassification + switch m["computeType"] { + case string(MonitorComputeTypeServerlessSpark): + b = &MonitorServerlessSparkCompute{} + default: + b = &MonitorComputeConfigurationBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMonitorComputeIdentityBaseClassification(rawMsg json.RawMessage) (MonitorComputeIdentityBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b MonitorComputeIdentityBaseClassification + switch m["computeIdentityType"] { + case string(MonitorComputeIdentityTypeAmlToken): + b = &AmlTokenComputeIdentity{} + case string(MonitorComputeIdentityTypeManagedIdentity): + b = &ManagedComputeIdentity{} + default: + b = &MonitorComputeIdentityBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMonitoringAlertNotificationSettingsBaseClassification(rawMsg json.RawMessage) (MonitoringAlertNotificationSettingsBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b MonitoringAlertNotificationSettingsBaseClassification + switch m["alertNotificationType"] { + case string(MonitoringAlertNotificationTypeAzureMonitor): + b = &AzMonMonitoringAlertNotificationSettings{} + case string(MonitoringAlertNotificationTypeEmail): + b = &EmailMonitoringAlertNotificationSettings{} + default: + b = &MonitoringAlertNotificationSettingsBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMonitoringFeatureFilterBaseClassification(rawMsg json.RawMessage) (MonitoringFeatureFilterBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b MonitoringFeatureFilterBaseClassification + switch m["filterType"] { + case string(MonitoringFeatureFilterTypeAllFeatures): + b = &AllFeatures{} + case string(MonitoringFeatureFilterTypeFeatureSubset): + b = &FeatureSubset{} + case string(MonitoringFeatureFilterTypeTopNByAttribution): + b = &TopNFeaturesByAttribution{} + default: + b = &MonitoringFeatureFilterBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMonitoringInputDataBaseClassification(rawMsg json.RawMessage) (MonitoringInputDataBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b MonitoringInputDataBaseClassification + switch m["inputDataType"] { + case string(MonitoringInputDataTypeFixed): + b = &FixedInputData{} + case string(MonitoringInputDataTypeStatic): + b = &StaticInputData{} + case string(MonitoringInputDataTypeTrailing): + b = &TrailingInputData{} + default: + b = &MonitoringInputDataBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMonitoringInputDataBaseClassificationArray(rawMsg json.RawMessage) ([]MonitoringInputDataBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]MonitoringInputDataBaseClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalMonitoringInputDataBaseClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + +func unmarshalMonitoringInputDataBaseClassificationMap(rawMsg json.RawMessage) (map[string]MonitoringInputDataBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages map[string]json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fMap := make(map[string]MonitoringInputDataBaseClassification, len(rawMessages)) + for key, rawMessage := range rawMessages { + f, err := unmarshalMonitoringInputDataBaseClassification(rawMessage) + if err != nil { + return nil, err + } + fMap[key] = f + } + return fMap, nil +} + +func unmarshalMonitoringSignalBaseClassification(rawMsg json.RawMessage) (MonitoringSignalBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b MonitoringSignalBaseClassification + switch m["signalType"] { + case string(MonitoringSignalTypeCustom): + b = &CustomMonitoringSignal{} + case string(MonitoringSignalTypeDataDrift): + b = &DataDriftMonitoringSignal{} + case string(MonitoringSignalTypeDataQuality): + b = &DataQualityMonitoringSignal{} + case string(MonitoringSignalTypeFeatureAttributionDrift): + b = &FeatureAttributionDriftMonitoringSignal{} + case string(MonitoringSignalTypeGenerationSafetyQuality): + b = &GenerationSafetyQualityMonitoringSignal{} + case string(MonitoringSignalTypeGenerationTokenStatistics): + b = &GenerationTokenStatisticsSignal{} + case string(MonitoringSignalTypeModelPerformance): + b = &ModelPerformanceSignal{} + case string(MonitoringSignalTypePredictionDrift): + b = &PredictionDriftMonitoringSignal{} + default: + b = &MonitoringSignalBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalMonitoringSignalBaseClassificationMap(rawMsg json.RawMessage) (map[string]MonitoringSignalBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages map[string]json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fMap := make(map[string]MonitoringSignalBaseClassification, len(rawMessages)) + for key, rawMessage := range rawMessages { + f, err := unmarshalMonitoringSignalBaseClassification(rawMessage) + if err != nil { + return nil, err + } + fMap[key] = f + } + return fMap, nil +} + func unmarshalNCrossValidationsClassification(rawMsg json.RawMessage) (NCrossValidationsClassification, error) { if rawMsg == nil { return nil, nil @@ -449,6 +898,42 @@ func unmarshalNCrossValidationsClassification(rawMsg json.RawMessage) (NCrossVal return b, json.Unmarshal(rawMsg, b) } +func unmarshalNodesClassification(rawMsg json.RawMessage) (NodesClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b NodesClassification + switch m["nodesValueType"] { + case string(NodesValueTypeAll): + b = &AllNodes{} + default: + b = &Nodes{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalOneLakeArtifactClassification(rawMsg json.RawMessage) (OneLakeArtifactClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b OneLakeArtifactClassification + switch m["artifactType"] { + case string(OneLakeArtifactTypeLakeHouse): + b = &LakeHouseArtifact{} + default: + b = &OneLakeArtifact{} + } + return b, json.Unmarshal(rawMsg, b) +} + func unmarshalOnlineDeploymentPropertiesClassification(rawMsg json.RawMessage) (OnlineDeploymentPropertiesClassification, error) { if rawMsg == nil { return nil, nil @@ -489,6 +974,126 @@ func unmarshalOnlineScaleSettingsClassification(rawMsg json.RawMessage) (OnlineS return b, json.Unmarshal(rawMsg, b) } +func unmarshalOutboundRuleClassification(rawMsg json.RawMessage) (OutboundRuleClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b OutboundRuleClassification + switch m["type"] { + case string(RuleTypeFQDN): + b = &FqdnOutboundRule{} + case string(RuleTypePrivateEndpoint): + b = &PrivateEndpointOutboundRule{} + case string(RuleTypeServiceTag): + b = &ServiceTagOutboundRule{} + default: + b = &OutboundRule{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalOutboundRuleClassificationMap(rawMsg json.RawMessage) (map[string]OutboundRuleClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages map[string]json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fMap := make(map[string]OutboundRuleClassification, len(rawMessages)) + for key, rawMessage := range rawMessages { + f, err := unmarshalOutboundRuleClassification(rawMessage) + if err != nil { + return nil, err + } + fMap[key] = f + } + return fMap, nil +} + +func unmarshalPackageInputPathBaseClassification(rawMsg json.RawMessage) (PackageInputPathBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b PackageInputPathBaseClassification + switch m["inputPathType"] { + case string(InputPathTypePathID): + b = &PackageInputPathID{} + case string(InputPathTypePathVersion): + b = &PackageInputPathVersion{} + case string(InputPathTypeURL): + b = &PackageInputPathURL{} + default: + b = &PackageInputPathBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalPendingUploadCredentialDtoClassification(rawMsg json.RawMessage) (PendingUploadCredentialDtoClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b PendingUploadCredentialDtoClassification + switch m["credentialType"] { + case string(PendingUploadCredentialTypeSAS): + b = &SASCredentialDto{} + default: + b = &PendingUploadCredentialDto{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalPredictionDriftMetricThresholdBaseClassification(rawMsg json.RawMessage) (PredictionDriftMetricThresholdBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b PredictionDriftMetricThresholdBaseClassification + switch m["dataType"] { + case string(MonitoringFeatureDataTypeCategorical): + b = &CategoricalPredictionDriftMetricThreshold{} + case string(MonitoringFeatureDataTypeNumerical): + b = &NumericalPredictionDriftMetricThreshold{} + default: + b = &PredictionDriftMetricThresholdBase{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalPredictionDriftMetricThresholdBaseClassificationArray(rawMsg json.RawMessage) ([]PredictionDriftMetricThresholdBaseClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]PredictionDriftMetricThresholdBaseClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalPredictionDriftMetricThresholdBaseClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + func unmarshalSamplingAlgorithmClassification(rawMsg json.RawMessage) (SamplingAlgorithmClassification, error) { if rawMsg == nil { return nil, nil @@ -523,6 +1128,10 @@ func unmarshalScheduleActionBaseClassification(rawMsg json.RawMessage) (Schedule switch m["actionType"] { case string(ScheduleActionTypeCreateJob): b = &JobScheduleAction{} + case string(ScheduleActionTypeCreateMonitor): + b = &CreateMonitorAction{} + case string(ScheduleActionTypeImportData): + b = &ImportDataAction{} case string(ScheduleActionTypeInvokeBatchEndpoint): b = &EndpointScheduleAction{} default: @@ -551,6 +1160,26 @@ func unmarshalSeasonalityClassification(rawMsg json.RawMessage) (SeasonalityClas return b, json.Unmarshal(rawMsg, b) } +func unmarshalSparkJobEntryClassification(rawMsg json.RawMessage) (SparkJobEntryClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b SparkJobEntryClassification + switch m["sparkJobEntryType"] { + case string(SparkJobEntryTypeSparkJobPythonEntry): + b = &SparkJobPythonEntry{} + case string(SparkJobEntryTypeSparkJobScalaEntry): + b = &SparkJobScalaEntry{} + default: + b = &SparkJobEntry{} + } + return b, json.Unmarshal(rawMsg, b) +} + func unmarshalTargetLagsClassification(rawMsg json.RawMessage) (TargetLagsClassification, error) { if rawMsg == nil { return nil, nil @@ -611,6 +1240,43 @@ func unmarshalTriggerBaseClassification(rawMsg json.RawMessage) (TriggerBaseClas return b, json.Unmarshal(rawMsg, b) } +func unmarshalWebhookClassification(rawMsg json.RawMessage) (WebhookClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b WebhookClassification + switch m["webhookType"] { + case string(WebhookTypeAzureDevOps): + b = &AzureDevOpsWebhook{} + default: + b = &Webhook{} + } + return b, json.Unmarshal(rawMsg, b) +} + +func unmarshalWebhookClassificationMap(rawMsg json.RawMessage) (map[string]WebhookClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages map[string]json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fMap := make(map[string]WebhookClassification, len(rawMessages)) + for key, rawMessage := range rawMessages { + f, err := unmarshalWebhookClassification(rawMessage) + if err != nil { + return nil, err + } + fMap[key] = f + } + return fMap, nil +} + func unmarshalWorkspaceConnectionPropertiesV2Classification(rawMsg json.RawMessage) (WorkspaceConnectionPropertiesV2Classification, error) { if rawMsg == nil { return nil, nil @@ -621,6 +1287,12 @@ func unmarshalWorkspaceConnectionPropertiesV2Classification(rawMsg json.RawMessa } var b WorkspaceConnectionPropertiesV2Classification switch m["authType"] { + case string(ConnectionAuthTypeAccessKey): + b = &AccessKeyAuthTypeWorkspaceConnectionProperties{} + case string(ConnectionAuthTypeAPIKey): + b = &APIKeyAuthWorkspaceConnectionProperties{} + case string(ConnectionAuthTypeCustomKeys): + b = &CustomKeysWorkspaceConnectionProperties{} case string(ConnectionAuthTypeManagedIdentity): b = &ManagedIdentityAuthTypeWorkspaceConnectionProperties{} case string(ConnectionAuthTypeNone): @@ -629,6 +1301,8 @@ func unmarshalWorkspaceConnectionPropertiesV2Classification(rawMsg json.RawMessa b = &PATAuthTypeWorkspaceConnectionProperties{} case string(ConnectionAuthTypeSAS): b = &SASAuthTypeWorkspaceConnectionProperties{} + case string(ConnectionAuthTypeServicePrincipal): + b = &ServicePrincipalAuthTypeWorkspaceConnectionProperties{} case string(ConnectionAuthTypeUsernamePassword): b = &UsernamePasswordAuthTypeWorkspaceConnectionProperties{} default: diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/privateendpointconnections_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/privateendpointconnections_client.go index 98fd42bb9794..95ef7fbdab8c 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/privateendpointconnections_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/privateendpointconnections_client.go @@ -44,18 +44,19 @@ func NewPrivateEndpointConnectionsClient(subscriptionID string, credential azcor return client, nil } -// CreateOrUpdate - Update the state of specified private endpoint connection associated with the workspace. +// CreateOrUpdate - Called by end-users to approve or reject a PE connection. This method must validate and forward the call +// to NRP. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. -// - privateEndpointConnectionName - The name of the private endpoint connection associated with the workspace -// - properties - The private endpoint connection properties. +// - privateEndpointConnectionName - NRP Private Endpoint Connection Name +// - body - PrivateEndpointConnection object // - options - PrivateEndpointConnectionsClientCreateOrUpdateOptions contains the optional parameters for the PrivateEndpointConnectionsClient.CreateOrUpdate // method. -func (client *PrivateEndpointConnectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string, properties PrivateEndpointConnection, options *PrivateEndpointConnectionsClientCreateOrUpdateOptions) (PrivateEndpointConnectionsClientCreateOrUpdateResponse, error) { - req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, privateEndpointConnectionName, properties, options) +func (client *PrivateEndpointConnectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string, body PrivateEndpointConnection, options *PrivateEndpointConnectionsClientCreateOrUpdateOptions) (PrivateEndpointConnectionsClientCreateOrUpdateResponse, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, privateEndpointConnectionName, body, options) if err != nil { return PrivateEndpointConnectionsClientCreateOrUpdateResponse{}, err } @@ -70,7 +71,7 @@ func (client *PrivateEndpointConnectionsClient) CreateOrUpdate(ctx context.Conte } // createOrUpdateCreateRequest creates the CreateOrUpdate request. -func (client *PrivateEndpointConnectionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string, properties PrivateEndpointConnection, options *PrivateEndpointConnectionsClientCreateOrUpdateOptions) (*policy.Request, error) { +func (client *PrivateEndpointConnectionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string, body PrivateEndpointConnection, options *PrivateEndpointConnectionsClientCreateOrUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") @@ -93,10 +94,10 @@ func (client *PrivateEndpointConnectionsClient) createOrUpdateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} - return req, runtime.MarshalAsJSON(req, properties) + return req, runtime.MarshalAsJSON(req, body) } // createOrUpdateHandleResponse handles the CreateOrUpdate response. @@ -108,13 +109,13 @@ func (client *PrivateEndpointConnectionsClient) createOrUpdateHandleResponse(res return result, nil } -// Delete - Deletes the specified private endpoint connection associated with the workspace. +// Delete - Called by end-users to delete a PE connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. -// - privateEndpointConnectionName - The name of the private endpoint connection associated with the workspace +// - privateEndpointConnectionName - NRP Private Endpoint Connection Name // - options - PrivateEndpointConnectionsClientDeleteOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Delete // method. func (client *PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientDeleteOptions) (PrivateEndpointConnectionsClientDeleteResponse, error) { @@ -156,19 +157,19 @@ func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil } -// Get - Gets the specified private endpoint connection associated with the workspace. +// Get - Called by end-users to get a PE connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. -// - privateEndpointConnectionName - The name of the private endpoint connection associated with the workspace +// - privateEndpointConnectionName - NRP Private Endpoint Connection Name // - options - PrivateEndpointConnectionsClientGetOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Get // method. func (client *PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientGetOptions) (PrivateEndpointConnectionsClientGetResponse, error) { @@ -210,7 +211,7 @@ func (client *PrivateEndpointConnectionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,9 +226,9 @@ func (client *PrivateEndpointConnectionsClient) getHandleResponse(resp *http.Res return result, nil } -// NewListPager - List all the private endpoint connections associated with the workspace. +// NewListPager - Called by end-users to get all PE connections. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.NewListPager @@ -257,6 +258,10 @@ func (client *PrivateEndpointConnectionsClient) NewListPager(resourceGroupName s // listCreateRequest creates the List request. func (client *PrivateEndpointConnectionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *PrivateEndpointConnectionsClientListOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } @@ -265,16 +270,12 @@ func (client *PrivateEndpointConnectionsClient) listCreateRequest(ctx context.Co return nil, errors.New("parameter workspaceName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) - if client.subscriptionID == "" { - return nil, errors.New("parameter client.subscriptionID cannot be empty") - } - urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/privateendpointconnections_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/privateendpointconnections_client_example_test.go deleted file mode 100644 index 78b5326d67ff..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/privateendpointconnections_client_example_test.go +++ /dev/null @@ -1,175 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/PrivateEndpointConnection/list.json -func ExamplePrivateEndpointConnectionsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewPrivateEndpointConnectionsClient().NewListPager("rg-1234", "testworkspace", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.PrivateEndpointConnectionListResult = armmachinelearning.PrivateEndpointConnectionListResult{ - // Value: []*armmachinelearning.PrivateEndpointConnection{ - // { - // Name: to.Ptr("{privateEndpointConnectionName}"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/privateEndpointConnections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/privateEndpointConnections/{privateEndpointConnectionName}"), - // Properties: &armmachinelearning.PrivateEndpointConnectionProperties{ - // PrivateEndpoint: &armmachinelearning.PrivateEndpoint{ - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.Network/privateEndpoints/petest01"), - // }, - // PrivateLinkServiceConnectionState: &armmachinelearning.PrivateLinkServiceConnectionState{ - // Description: to.Ptr("Auto-Approved"), - // ActionsRequired: to.Ptr("None"), - // Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - // }, - // ProvisioningState: to.Ptr(armmachinelearning.PrivateEndpointConnectionProvisioningStateSucceeded), - // }, - // }, - // { - // Name: to.Ptr("{privateEndpointConnectionName}"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/privateEndpointConnections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/privateEndpointConnections/{privateEndpointConnectionName}"), - // Properties: &armmachinelearning.PrivateEndpointConnectionProperties{ - // PrivateEndpoint: &armmachinelearning.PrivateEndpoint{ - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.Network/privateEndpoints/petest01"), - // }, - // PrivateLinkServiceConnectionState: &armmachinelearning.PrivateLinkServiceConnectionState{ - // Description: to.Ptr("Auto-Approved"), - // ActionsRequired: to.Ptr("None"), - // Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - // }, - // ProvisioningState: to.Ptr(armmachinelearning.PrivateEndpointConnectionProvisioningStateSucceeded), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/PrivateEndpointConnection/get.json -func ExamplePrivateEndpointConnectionsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewPrivateEndpointConnectionsClient().Get(ctx, "rg-1234", "testworkspace", "{privateEndpointConnectionName}", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.PrivateEndpointConnection = armmachinelearning.PrivateEndpointConnection{ - // Name: to.Ptr("{privateEndpointConnectionName}"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/privateEndpointConnections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/privateEndpointConnections/{privateEndpointConnectionName}"), - // Properties: &armmachinelearning.PrivateEndpointConnectionProperties{ - // PrivateEndpoint: &armmachinelearning.PrivateEndpoint{ - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.Network/privateEndpoints/petest01"), - // }, - // PrivateLinkServiceConnectionState: &armmachinelearning.PrivateLinkServiceConnectionState{ - // Description: to.Ptr("Auto-Approved"), - // ActionsRequired: to.Ptr("None"), - // Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - // }, - // ProvisioningState: to.Ptr(armmachinelearning.PrivateEndpointConnectionProvisioningStateSucceeded), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/PrivateEndpointConnection/createOrUpdate.json -func ExamplePrivateEndpointConnectionsClient_CreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewPrivateEndpointConnectionsClient().CreateOrUpdate(ctx, "rg-1234", "testworkspace", "{privateEndpointConnectionName}", armmachinelearning.PrivateEndpointConnection{ - Properties: &armmachinelearning.PrivateEndpointConnectionProperties{ - PrivateLinkServiceConnectionState: &armmachinelearning.PrivateLinkServiceConnectionState{ - Description: to.Ptr("Auto-Approved"), - Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.PrivateEndpointConnection = armmachinelearning.PrivateEndpointConnection{ - // Name: to.Ptr("{privateEndpointConnectionName}"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/privateEndpointConnections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/privateEndpointConnections/{privateEndpointConnectionName}"), - // Properties: &armmachinelearning.PrivateEndpointConnectionProperties{ - // PrivateEndpoint: &armmachinelearning.PrivateEndpoint{ - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.Network/privateEndpoints/petest01"), - // }, - // PrivateLinkServiceConnectionState: &armmachinelearning.PrivateLinkServiceConnectionState{ - // Description: to.Ptr("Auto-Approved"), - // ActionsRequired: to.Ptr("None"), - // Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - // }, - // ProvisioningState: to.Ptr(armmachinelearning.PrivateEndpointConnectionProvisioningStateSucceeded), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/PrivateEndpointConnection/delete.json -func ExamplePrivateEndpointConnectionsClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewPrivateEndpointConnectionsClient().Delete(ctx, "rg-1234", "testworkspace", "{privateEndpointConnectionName}", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/privatelinkresources_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/privatelinkresources_client.go index 1a7327c2d4b1..7564b7e23be9 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/privatelinkresources_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/privatelinkresources_client.go @@ -44,27 +44,37 @@ func NewPrivateLinkResourcesClient(subscriptionID string, credential azcore.Toke return client, nil } -// List - Gets the private link resources that need to be created for a workspace. -// If the operation fails it returns an *azcore.ResponseError type. +// NewListPager - Called by Client (Portal, CLI, etc) to get available "private link resources" for the workspace. Each "private +// link resource" is a connection endpoint (IP address) to the resource. Pre single +// connection endpoint per workspace: the Data Plane IP address, returned by DNS resolution. Other RPs, such as Azure Storage, +// have multiple - one for Blobs, other for Queues, etc. Defined in the "[NRP] +// Private Endpoint Design" doc, topic "GET API for GroupIds". // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. -// - options - PrivateLinkResourcesClientListOptions contains the optional parameters for the PrivateLinkResourcesClient.List +// - options - PrivateLinkResourcesClientListOptions contains the optional parameters for the PrivateLinkResourcesClient.NewListPager // method. -func (client *PrivateLinkResourcesClient) List(ctx context.Context, resourceGroupName string, workspaceName string, options *PrivateLinkResourcesClientListOptions) (PrivateLinkResourcesClientListResponse, error) { - req, err := client.listCreateRequest(ctx, resourceGroupName, workspaceName, options) - if err != nil { - return PrivateLinkResourcesClientListResponse{}, err - } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PrivateLinkResourcesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateLinkResourcesClientListResponse{}, runtime.NewResponseError(resp) - } - return client.listHandleResponse(resp) +func (client *PrivateLinkResourcesClient) NewListPager(resourceGroupName string, workspaceName string, options *PrivateLinkResourcesClientListOptions) *runtime.Pager[PrivateLinkResourcesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[PrivateLinkResourcesClientListResponse]{ + More: func(page PrivateLinkResourcesClientListResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *PrivateLinkResourcesClientListResponse) (PrivateLinkResourcesClientListResponse, error) { + req, err := client.listCreateRequest(ctx, resourceGroupName, workspaceName, options) + if err != nil { + return PrivateLinkResourcesClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrivateLinkResourcesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PrivateLinkResourcesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) } // listCreateRequest creates the List request. @@ -87,7 +97,7 @@ func (client *PrivateLinkResourcesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/privatelinkresources_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/privatelinkresources_client_example_test.go deleted file mode 100644 index e344c27f9ec0..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/privatelinkresources_client_example_test.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/PrivateLinkResource/list.json -func ExamplePrivateLinkResourcesClient_List() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewPrivateLinkResourcesClient().List(ctx, "rg-1234", "testworkspace", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.PrivateLinkResourceListResult = armmachinelearning.PrivateLinkResourceListResult{ - // Value: []*armmachinelearning.PrivateLinkResource{ - // { - // Name: to.Ptr("amlworkspace"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/privateLinkResources"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/privateLinkResources/amlworkspace"), - // Properties: &armmachinelearning.PrivateLinkResourceProperties{ - // GroupID: to.Ptr("amlworkspace"), - // RequiredMembers: []*string{ - // to.Ptr("default")}, - // }, - // }}, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/quotas_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/quotas_client.go index 55119e1469d5..0ac1ce1e1b8f 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/quotas_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/quotas_client.go @@ -46,7 +46,7 @@ func NewQuotasClient(subscriptionID string, credential azcore.TokenCredential, o // NewListPager - Gets the currently assigned Workspace Quotas based on VMFamily. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - location - The location for which resource usage is queried. // - options - QuotasClientListOptions contains the optional parameters for the QuotasClient.NewListPager method. func (client *QuotasClient) NewListPager(location string, options *QuotasClientListOptions) *runtime.Pager[QuotasClientListResponse] { @@ -93,7 +93,7 @@ func (client *QuotasClient) listCreateRequest(ctx context.Context, location stri return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -111,7 +111,7 @@ func (client *QuotasClient) listHandleResponse(resp *http.Response) (QuotasClien // Update - Update quota for each VM family in workspace. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - location - The location for update quota is queried. // - parameters - Quota update parameters. // - options - QuotasClientUpdateOptions contains the optional parameters for the QuotasClient.Update method. @@ -146,7 +146,7 @@ func (client *QuotasClient) updateCreateRequest(ctx context.Context, location st return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, parameters) diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/quotas_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/quotas_client_example_test.go deleted file mode 100644 index cad9ba904c35..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/quotas_client_example_test.go +++ /dev/null @@ -1,498 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Quota/update.json -func ExampleQuotasClient_Update() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewQuotasClient().Update(ctx, "eastus", armmachinelearning.QuotaUpdateParameters{ - Value: []*armmachinelearning.QuotaBaseProperties{ - { - Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - Limit: to.Ptr[int64](100), - Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - }, - { - Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - Limit: to.Ptr[int64](200), - Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - }}, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.UpdateWorkspaceQuotasResult = armmachinelearning.UpdateWorkspaceQuotasResult{ - // Value: []*armmachinelearning.UpdateWorkspaceQuotas{ - // { - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](100), - // Status: to.Ptr(armmachinelearning.StatusSuccess), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](200), - // Status: to.Ptr(armmachinelearning.StatusSuccess), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }}, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Quota/list.json -func ExampleQuotasClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewQuotasClient().NewListPager("eastus", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ListWorkspaceQuotas = armmachinelearning.ListWorkspaceQuotas{ - // Value: []*armmachinelearning.ResourceQuota{ - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_D_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](48), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_D_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](12), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_D_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](12), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_D_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quota"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](12), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](12), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_Dv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_Dv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_Dv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_Dv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_FSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_FSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_FSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_FSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](12), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_NCv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_NCv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_NCv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_NCv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_NCv3_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_NCv3_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_NCv3_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_NCv3_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_ND_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_ND_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_ND_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_ND_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_NDv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_NDv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_NDv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_NDv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/quotas/Standard_NV_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/quotas/Standard_NV_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/quotas/Standard_NV_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }, - // { - // Name: &armmachinelearning.ResourceName{ - // LocalizedValue: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/quotas"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace3/quotas/Standard_NV_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.QuotaUnitCount), - // }}, - // } - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registries_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registries_client.go new file mode 100644 index 000000000000..c20cda58422a --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registries_client.go @@ -0,0 +1,486 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RegistriesClient contains the methods for the Registries group. +// Don't use this type directly, use NewRegistriesClient() instead. +type RegistriesClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistriesClient creates a new instance of RegistriesClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistriesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistriesClient, error) { + cl, err := arm.NewClient(moduleName+".RegistriesClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistriesClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update registry +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - body - Details required to create the registry. +// - options - RegistriesClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistriesClient.BeginCreateOrUpdate +// method. +func (client *RegistriesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, body Registry, options *RegistriesClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistriesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistriesClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistriesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update registry +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistriesClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, body Registry, options *RegistriesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistriesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, body Registry, options *RegistriesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete registry +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - options - RegistriesClientBeginDeleteOptions contains the optional parameters for the RegistriesClient.BeginDelete method. +func (client *RegistriesClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, options *RegistriesClientBeginDeleteOptions) (*runtime.Poller[RegistriesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistriesClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistriesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete registry +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistriesClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, options *RegistriesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistriesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *RegistriesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get registry +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - options - RegistriesClientGetOptions contains the optional parameters for the RegistriesClient.Get method. +func (client *RegistriesClient) Get(ctx context.Context, resourceGroupName string, registryName string, options *RegistriesClientGetOptions) (RegistriesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, options) + if err != nil { + return RegistriesClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistriesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistriesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistriesClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *RegistriesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistriesClient) getHandleResponse(resp *http.Response) (RegistriesClientGetResponse, error) { + result := RegistriesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Registry); err != nil { + return RegistriesClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List registries +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - options - RegistriesClientListOptions contains the optional parameters for the RegistriesClient.NewListPager method. +func (client *RegistriesClient) NewListPager(resourceGroupName string, options *RegistriesClientListOptions) *runtime.Pager[RegistriesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistriesClientListResponse]{ + More: func(page RegistriesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistriesClientListResponse) (RegistriesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistriesClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistriesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistriesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistriesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *RegistriesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistriesClient) listHandleResponse(resp *http.Response) (RegistriesClientListResponse, error) { + result := RegistriesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RegistryTrackedResourceArmPaginatedResult); err != nil { + return RegistriesClientListResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - List registries by subscription +// +// Generated from API version 2023-06-01-preview +// - options - RegistriesClientListBySubscriptionOptions contains the optional parameters for the RegistriesClient.NewListBySubscriptionPager +// method. +func (client *RegistriesClient) NewListBySubscriptionPager(options *RegistriesClientListBySubscriptionOptions) *runtime.Pager[RegistriesClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistriesClientListBySubscriptionResponse]{ + More: func(page RegistriesClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistriesClientListBySubscriptionResponse) (RegistriesClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistriesClientListBySubscriptionResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistriesClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistriesClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *RegistriesClient) listBySubscriptionCreateRequest(ctx context.Context, options *RegistriesClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/registries" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *RegistriesClient) listBySubscriptionHandleResponse(resp *http.Response) (RegistriesClientListBySubscriptionResponse, error) { + result := RegistriesClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RegistryTrackedResourceArmPaginatedResult); err != nil { + return RegistriesClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// BeginRemoveRegions - Remove regions from registry +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - body - Details required to create the registry. +// - options - RegistriesClientBeginRemoveRegionsOptions contains the optional parameters for the RegistriesClient.BeginRemoveRegions +// method. +func (client *RegistriesClient) BeginRemoveRegions(ctx context.Context, resourceGroupName string, registryName string, body Registry, options *RegistriesClientBeginRemoveRegionsOptions) (*runtime.Poller[RegistriesClientRemoveRegionsResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.removeRegions(ctx, resourceGroupName, registryName, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistriesClientRemoveRegionsResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistriesClientRemoveRegionsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// RemoveRegions - Remove regions from registry +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistriesClient) removeRegions(ctx context.Context, resourceGroupName string, registryName string, body Registry, options *RegistriesClientBeginRemoveRegionsOptions) (*http.Response, error) { + req, err := client.removeRegionsCreateRequest(ctx, resourceGroupName, registryName, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// removeRegionsCreateRequest creates the RemoveRegions request. +func (client *RegistriesClient) removeRegionsCreateRequest(ctx context.Context, resourceGroupName string, registryName string, body Registry, options *RegistriesClientBeginRemoveRegionsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/removeRegions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// Update - Update tags +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - body - Details required to create the registry. +// - options - RegistriesClientUpdateOptions contains the optional parameters for the RegistriesClient.Update method. +func (client *RegistriesClient) Update(ctx context.Context, resourceGroupName string, registryName string, body PartialRegistryPartialTrackedResource, options *RegistriesClientUpdateOptions) (RegistriesClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, registryName, body, options) + if err != nil { + return RegistriesClientUpdateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistriesClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistriesClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *RegistriesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, body PartialRegistryPartialTrackedResource, options *RegistriesClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// updateHandleResponse handles the Update response. +func (client *RegistriesClient) updateHandleResponse(resp *http.Response) (RegistriesClientUpdateResponse, error) { + result := RegistriesClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Registry); err != nil { + return RegistriesClientUpdateResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrycodecontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycodecontainers_client.go new file mode 100644 index 000000000000..cd7048cee30c --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycodecontainers_client.go @@ -0,0 +1,326 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RegistryCodeContainersClient contains the methods for the RegistryCodeContainers group. +// Don't use this type directly, use NewRegistryCodeContainersClient() instead. +type RegistryCodeContainersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryCodeContainersClient creates a new instance of RegistryCodeContainersClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryCodeContainersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryCodeContainersClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryCodeContainersClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryCodeContainersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update Code container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Container name. +// - body - Container entity to create or update. +// - options - RegistryCodeContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryCodeContainersClient.BeginCreateOrUpdate +// method. +func (client *RegistryCodeContainersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, codeName string, body CodeContainer, options *RegistryCodeContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryCodeContainersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, codeName, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryCodeContainersClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryCodeContainersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update Code container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryCodeContainersClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, codeName string, body CodeContainer, options *RegistryCodeContainersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, codeName, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryCodeContainersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, body CodeContainer, options *RegistryCodeContainersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete Code container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Container name. +// - options - RegistryCodeContainersClientBeginDeleteOptions contains the optional parameters for the RegistryCodeContainersClient.BeginDelete +// method. +func (client *RegistryCodeContainersClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, codeName string, options *RegistryCodeContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryCodeContainersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, codeName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryCodeContainersClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryCodeContainersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete Code container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryCodeContainersClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, codeName string, options *RegistryCodeContainersClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, codeName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryCodeContainersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, options *RegistryCodeContainersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get Code container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Container name. +// - options - RegistryCodeContainersClientGetOptions contains the optional parameters for the RegistryCodeContainersClient.Get +// method. +func (client *RegistryCodeContainersClient) Get(ctx context.Context, resourceGroupName string, registryName string, codeName string, options *RegistryCodeContainersClientGetOptions) (RegistryCodeContainersClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, codeName, options) + if err != nil { + return RegistryCodeContainersClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryCodeContainersClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryCodeContainersClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryCodeContainersClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, options *RegistryCodeContainersClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryCodeContainersClient) getHandleResponse(resp *http.Response) (RegistryCodeContainersClientGetResponse, error) { + result := RegistryCodeContainersClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CodeContainer); err != nil { + return RegistryCodeContainersClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List containers. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - options - RegistryCodeContainersClientListOptions contains the optional parameters for the RegistryCodeContainersClient.NewListPager +// method. +func (client *RegistryCodeContainersClient) NewListPager(resourceGroupName string, registryName string, options *RegistryCodeContainersClientListOptions) *runtime.Pager[RegistryCodeContainersClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryCodeContainersClientListResponse]{ + More: func(page RegistryCodeContainersClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryCodeContainersClientListResponse) (RegistryCodeContainersClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryCodeContainersClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryCodeContainersClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryCodeContainersClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryCodeContainersClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *RegistryCodeContainersClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryCodeContainersClient) listHandleResponse(resp *http.Response) (RegistryCodeContainersClientListResponse, error) { + result := RegistryCodeContainersClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CodeContainerResourceArmPaginatedResult); err != nil { + return RegistryCodeContainersClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrycodeversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycodeversions_client.go new file mode 100644 index 000000000000..a4fef60ecfeb --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycodeversions_client.go @@ -0,0 +1,422 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// RegistryCodeVersionsClient contains the methods for the RegistryCodeVersions group. +// Don't use this type directly, use NewRegistryCodeVersionsClient() instead. +type RegistryCodeVersionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryCodeVersionsClient creates a new instance of RegistryCodeVersionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryCodeVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryCodeVersionsClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryCodeVersionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryCodeVersionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CreateOrGetStartPendingUpload - Generate a storage location and credential for the client to upload a code asset to. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Pending upload name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Pending upload request object +// - options - RegistryCodeVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the RegistryCodeVersionsClient.CreateOrGetStartPendingUpload +// method. +func (client *RegistryCodeVersionsClient) CreateOrGetStartPendingUpload(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, body PendingUploadRequestDto, options *RegistryCodeVersionsClientCreateOrGetStartPendingUploadOptions) (RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse, error) { + req, err := client.createOrGetStartPendingUploadCreateRequest(ctx, resourceGroupName, registryName, codeName, version, body, options) + if err != nil { + return RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse{}, runtime.NewResponseError(resp) + } + return client.createOrGetStartPendingUploadHandleResponse(resp) +} + +// createOrGetStartPendingUploadCreateRequest creates the CreateOrGetStartPendingUpload request. +func (client *RegistryCodeVersionsClient) createOrGetStartPendingUploadCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, body PendingUploadRequestDto, options *RegistryCodeVersionsClientCreateOrGetStartPendingUploadOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}/startPendingUpload" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// createOrGetStartPendingUploadHandleResponse handles the CreateOrGetStartPendingUpload response. +func (client *RegistryCodeVersionsClient) createOrGetStartPendingUploadHandleResponse(resp *http.Response) (RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse, error) { + result := RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PendingUploadResponseDto); err != nil { + return RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + return result, nil +} + +// BeginCreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Container name. +// - version - Version identifier. +// - body - Version entity to create or update. +// - options - RegistryCodeVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryCodeVersionsClient.BeginCreateOrUpdate +// method. +func (client *RegistryCodeVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, body CodeVersion, options *RegistryCodeVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryCodeVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, codeName, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryCodeVersionsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryCodeVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryCodeVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, body CodeVersion, options *RegistryCodeVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, codeName, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryCodeVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, body CodeVersion, options *RegistryCodeVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Container name. +// - version - Version identifier. +// - options - RegistryCodeVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryCodeVersionsClient.BeginDelete +// method. +func (client *RegistryCodeVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, options *RegistryCodeVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryCodeVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, codeName, version, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryCodeVersionsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryCodeVersionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryCodeVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, options *RegistryCodeVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, codeName, version, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryCodeVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, options *RegistryCodeVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Container name. +// - version - Version identifier. +// - options - RegistryCodeVersionsClientGetOptions contains the optional parameters for the RegistryCodeVersionsClient.Get +// method. +func (client *RegistryCodeVersionsClient) Get(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, options *RegistryCodeVersionsClientGetOptions) (RegistryCodeVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, codeName, version, options) + if err != nil { + return RegistryCodeVersionsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryCodeVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryCodeVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryCodeVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, version string, options *RegistryCodeVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryCodeVersionsClient) getHandleResponse(resp *http.Response) (RegistryCodeVersionsClientGetResponse, error) { + result := RegistryCodeVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CodeVersion); err != nil { + return RegistryCodeVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List versions. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - codeName - Container name. +// - options - RegistryCodeVersionsClientListOptions contains the optional parameters for the RegistryCodeVersionsClient.NewListPager +// method. +func (client *RegistryCodeVersionsClient) NewListPager(resourceGroupName string, registryName string, codeName string, options *RegistryCodeVersionsClientListOptions) *runtime.Pager[RegistryCodeVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryCodeVersionsClientListResponse]{ + More: func(page RegistryCodeVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryCodeVersionsClientListResponse) (RegistryCodeVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, codeName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryCodeVersionsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryCodeVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryCodeVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryCodeVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, codeName string, options *RegistryCodeVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if codeName == "" { + return nil, errors.New("parameter codeName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{codeName}", url.PathEscape(codeName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.OrderBy != nil { + reqQP.Set("$orderBy", *options.OrderBy) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryCodeVersionsClient) listHandleResponse(resp *http.Response) (RegistryCodeVersionsClientListResponse, error) { + result := RegistryCodeVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CodeVersionResourceArmPaginatedResult); err != nil { + return RegistryCodeVersionsClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrycomponentcontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycomponentcontainers_client.go new file mode 100644 index 000000000000..1118ecc768a5 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycomponentcontainers_client.go @@ -0,0 +1,326 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RegistryComponentContainersClient contains the methods for the RegistryComponentContainers group. +// Don't use this type directly, use NewRegistryComponentContainersClient() instead. +type RegistryComponentContainersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryComponentContainersClient creates a new instance of RegistryComponentContainersClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryComponentContainersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryComponentContainersClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryComponentContainersClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryComponentContainersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - componentName - Container name. +// - body - Container entity to create or update. +// - options - RegistryComponentContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryComponentContainersClient.BeginCreateOrUpdate +// method. +func (client *RegistryComponentContainersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, componentName string, body ComponentContainer, options *RegistryComponentContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryComponentContainersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, componentName, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryComponentContainersClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryComponentContainersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryComponentContainersClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, componentName string, body ComponentContainer, options *RegistryComponentContainersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, componentName, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryComponentContainersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, componentName string, body ComponentContainer, options *RegistryComponentContainersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if componentName == "" { + return nil, errors.New("parameter componentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{componentName}", url.PathEscape(componentName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - componentName - Container name. +// - options - RegistryComponentContainersClientBeginDeleteOptions contains the optional parameters for the RegistryComponentContainersClient.BeginDelete +// method. +func (client *RegistryComponentContainersClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, componentName string, options *RegistryComponentContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryComponentContainersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, componentName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryComponentContainersClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryComponentContainersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryComponentContainersClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, componentName string, options *RegistryComponentContainersClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, componentName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryComponentContainersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, componentName string, options *RegistryComponentContainersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if componentName == "" { + return nil, errors.New("parameter componentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{componentName}", url.PathEscape(componentName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - componentName - Container name. +// - options - RegistryComponentContainersClientGetOptions contains the optional parameters for the RegistryComponentContainersClient.Get +// method. +func (client *RegistryComponentContainersClient) Get(ctx context.Context, resourceGroupName string, registryName string, componentName string, options *RegistryComponentContainersClientGetOptions) (RegistryComponentContainersClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, componentName, options) + if err != nil { + return RegistryComponentContainersClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryComponentContainersClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryComponentContainersClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryComponentContainersClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, componentName string, options *RegistryComponentContainersClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if componentName == "" { + return nil, errors.New("parameter componentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{componentName}", url.PathEscape(componentName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryComponentContainersClient) getHandleResponse(resp *http.Response) (RegistryComponentContainersClientGetResponse, error) { + result := RegistryComponentContainersClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ComponentContainer); err != nil { + return RegistryComponentContainersClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List containers. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - options - RegistryComponentContainersClientListOptions contains the optional parameters for the RegistryComponentContainersClient.NewListPager +// method. +func (client *RegistryComponentContainersClient) NewListPager(resourceGroupName string, registryName string, options *RegistryComponentContainersClientListOptions) *runtime.Pager[RegistryComponentContainersClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryComponentContainersClientListResponse]{ + More: func(page RegistryComponentContainersClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryComponentContainersClientListResponse) (RegistryComponentContainersClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryComponentContainersClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryComponentContainersClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryComponentContainersClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryComponentContainersClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *RegistryComponentContainersClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryComponentContainersClient) listHandleResponse(resp *http.Response) (RegistryComponentContainersClientListResponse, error) { + result := RegistryComponentContainersClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ComponentContainerResourceArmPaginatedResult); err != nil { + return RegistryComponentContainersClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrycomponentversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycomponentversions_client.go new file mode 100644 index 000000000000..041d51847882 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrycomponentversions_client.go @@ -0,0 +1,356 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// RegistryComponentVersionsClient contains the methods for the RegistryComponentVersions group. +// Don't use this type directly, use NewRegistryComponentVersionsClient() instead. +type RegistryComponentVersionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryComponentVersionsClient creates a new instance of RegistryComponentVersionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryComponentVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryComponentVersionsClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryComponentVersionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryComponentVersionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - componentName - Container name. +// - version - Version identifier. +// - body - Version entity to create or update. +// - options - RegistryComponentVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryComponentVersionsClient.BeginCreateOrUpdate +// method. +func (client *RegistryComponentVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, body ComponentVersion, options *RegistryComponentVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryComponentVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, componentName, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryComponentVersionsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryComponentVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryComponentVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, body ComponentVersion, options *RegistryComponentVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, componentName, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryComponentVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, body ComponentVersion, options *RegistryComponentVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if componentName == "" { + return nil, errors.New("parameter componentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{componentName}", url.PathEscape(componentName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - componentName - Container name. +// - version - Version identifier. +// - options - RegistryComponentVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryComponentVersionsClient.BeginDelete +// method. +func (client *RegistryComponentVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, options *RegistryComponentVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryComponentVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, componentName, version, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryComponentVersionsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryComponentVersionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryComponentVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, options *RegistryComponentVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, componentName, version, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryComponentVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, options *RegistryComponentVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if componentName == "" { + return nil, errors.New("parameter componentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{componentName}", url.PathEscape(componentName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - componentName - Container name. +// - version - Version identifier. +// - options - RegistryComponentVersionsClientGetOptions contains the optional parameters for the RegistryComponentVersionsClient.Get +// method. +func (client *RegistryComponentVersionsClient) Get(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, options *RegistryComponentVersionsClientGetOptions) (RegistryComponentVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, componentName, version, options) + if err != nil { + return RegistryComponentVersionsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryComponentVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryComponentVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryComponentVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, componentName string, version string, options *RegistryComponentVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if componentName == "" { + return nil, errors.New("parameter componentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{componentName}", url.PathEscape(componentName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryComponentVersionsClient) getHandleResponse(resp *http.Response) (RegistryComponentVersionsClientGetResponse, error) { + result := RegistryComponentVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ComponentVersion); err != nil { + return RegistryComponentVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List versions. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - componentName - Container name. +// - options - RegistryComponentVersionsClientListOptions contains the optional parameters for the RegistryComponentVersionsClient.NewListPager +// method. +func (client *RegistryComponentVersionsClient) NewListPager(resourceGroupName string, registryName string, componentName string, options *RegistryComponentVersionsClientListOptions) *runtime.Pager[RegistryComponentVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryComponentVersionsClientListResponse]{ + More: func(page RegistryComponentVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryComponentVersionsClientListResponse) (RegistryComponentVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, componentName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryComponentVersionsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryComponentVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryComponentVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryComponentVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, componentName string, options *RegistryComponentVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if componentName == "" { + return nil, errors.New("parameter componentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{componentName}", url.PathEscape(componentName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.OrderBy != nil { + reqQP.Set("$orderBy", *options.OrderBy) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryComponentVersionsClient) listHandleResponse(resp *http.Response) (RegistryComponentVersionsClientListResponse, error) { + result := RegistryComponentVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ComponentVersionResourceArmPaginatedResult); err != nil { + return RegistryComponentVersionsClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrydatacontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrydatacontainers_client.go new file mode 100644 index 000000000000..9fc4bbf6ef78 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrydatacontainers_client.go @@ -0,0 +1,329 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RegistryDataContainersClient contains the methods for the RegistryDataContainers group. +// Don't use this type directly, use NewRegistryDataContainersClient() instead. +type RegistryDataContainersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryDataContainersClient creates a new instance of RegistryDataContainersClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryDataContainersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryDataContainersClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryDataContainersClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryDataContainersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Container name. +// - body - Container entity to create or update. +// - options - RegistryDataContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryDataContainersClient.BeginCreateOrUpdate +// method. +func (client *RegistryDataContainersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, name string, body DataContainer, options *RegistryDataContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryDataContainersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, name, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryDataContainersClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryDataContainersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryDataContainersClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, name string, body DataContainer, options *RegistryDataContainersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, name, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryDataContainersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, body DataContainer, options *RegistryDataContainersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Container name. +// - options - RegistryDataContainersClientBeginDeleteOptions contains the optional parameters for the RegistryDataContainersClient.BeginDelete +// method. +func (client *RegistryDataContainersClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, name string, options *RegistryDataContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryDataContainersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryDataContainersClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryDataContainersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryDataContainersClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, name string, options *RegistryDataContainersClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryDataContainersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, options *RegistryDataContainersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Container name. +// - options - RegistryDataContainersClientGetOptions contains the optional parameters for the RegistryDataContainersClient.Get +// method. +func (client *RegistryDataContainersClient) Get(ctx context.Context, resourceGroupName string, registryName string, name string, options *RegistryDataContainersClientGetOptions) (RegistryDataContainersClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, name, options) + if err != nil { + return RegistryDataContainersClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryDataContainersClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryDataContainersClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryDataContainersClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, options *RegistryDataContainersClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryDataContainersClient) getHandleResponse(resp *http.Response) (RegistryDataContainersClientGetResponse, error) { + result := RegistryDataContainersClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DataContainer); err != nil { + return RegistryDataContainersClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List Data containers. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - options - RegistryDataContainersClientListOptions contains the optional parameters for the RegistryDataContainersClient.NewListPager +// method. +func (client *RegistryDataContainersClient) NewListPager(resourceGroupName string, registryName string, options *RegistryDataContainersClientListOptions) *runtime.Pager[RegistryDataContainersClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryDataContainersClientListResponse]{ + More: func(page RegistryDataContainersClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryDataContainersClientListResponse) (RegistryDataContainersClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryDataContainersClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryDataContainersClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryDataContainersClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryDataContainersClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *RegistryDataContainersClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryDataContainersClient) listHandleResponse(resp *http.Response) (RegistryDataContainersClientListResponse, error) { + result := RegistryDataContainersClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DataContainerResourceArmPaginatedResult); err != nil { + return RegistryDataContainersClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrydataversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrydataversions_client.go new file mode 100644 index 000000000000..dbe62feadd0a --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrydataversions_client.go @@ -0,0 +1,428 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// RegistryDataVersionsClient contains the methods for the RegistryDataVersions group. +// Don't use this type directly, use NewRegistryDataVersionsClient() instead. +type RegistryDataVersionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryDataVersionsClient creates a new instance of RegistryDataVersionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryDataVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryDataVersionsClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryDataVersionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryDataVersionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CreateOrGetStartPendingUpload - Generate a storage location and credential for the client to upload a data asset to. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Data asset name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Pending upload request object +// - options - RegistryDataVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the RegistryDataVersionsClient.CreateOrGetStartPendingUpload +// method. +func (client *RegistryDataVersionsClient) CreateOrGetStartPendingUpload(ctx context.Context, resourceGroupName string, registryName string, name string, version string, body PendingUploadRequestDto, options *RegistryDataVersionsClientCreateOrGetStartPendingUploadOptions) (RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse, error) { + req, err := client.createOrGetStartPendingUploadCreateRequest(ctx, resourceGroupName, registryName, name, version, body, options) + if err != nil { + return RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse{}, runtime.NewResponseError(resp) + } + return client.createOrGetStartPendingUploadHandleResponse(resp) +} + +// createOrGetStartPendingUploadCreateRequest creates the CreateOrGetStartPendingUpload request. +func (client *RegistryDataVersionsClient) createOrGetStartPendingUploadCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, version string, body PendingUploadRequestDto, options *RegistryDataVersionsClientCreateOrGetStartPendingUploadOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}/startPendingUpload" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// createOrGetStartPendingUploadHandleResponse handles the CreateOrGetStartPendingUpload response. +func (client *RegistryDataVersionsClient) createOrGetStartPendingUploadHandleResponse(resp *http.Response) (RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse, error) { + result := RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PendingUploadResponseDto); err != nil { + return RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + return result, nil +} + +// BeginCreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Container name. +// - version - Version identifier. +// - body - Version entity to create or update. +// - options - RegistryDataVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryDataVersionsClient.BeginCreateOrUpdate +// method. +func (client *RegistryDataVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, name string, version string, body DataVersionBase, options *RegistryDataVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryDataVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, name, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryDataVersionsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryDataVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryDataVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, name string, version string, body DataVersionBase, options *RegistryDataVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, name, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryDataVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, version string, body DataVersionBase, options *RegistryDataVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Container name. +// - version - Version identifier. +// - options - RegistryDataVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryDataVersionsClient.BeginDelete +// method. +func (client *RegistryDataVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, name string, version string, options *RegistryDataVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryDataVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, name, version, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryDataVersionsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryDataVersionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryDataVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, name string, version string, options *RegistryDataVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, name, version, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryDataVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, version string, options *RegistryDataVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Container name. +// - version - Version identifier. +// - options - RegistryDataVersionsClientGetOptions contains the optional parameters for the RegistryDataVersionsClient.Get +// method. +func (client *RegistryDataVersionsClient) Get(ctx context.Context, resourceGroupName string, registryName string, name string, version string, options *RegistryDataVersionsClientGetOptions) (RegistryDataVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, name, version, options) + if err != nil { + return RegistryDataVersionsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryDataVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryDataVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryDataVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, version string, options *RegistryDataVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryDataVersionsClient) getHandleResponse(resp *http.Response) (RegistryDataVersionsClientGetResponse, error) { + result := RegistryDataVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DataVersionBase); err != nil { + return RegistryDataVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List data versions in the data container +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - name - Data container's name +// - options - RegistryDataVersionsClientListOptions contains the optional parameters for the RegistryDataVersionsClient.NewListPager +// method. +func (client *RegistryDataVersionsClient) NewListPager(resourceGroupName string, registryName string, name string, options *RegistryDataVersionsClientListOptions) *runtime.Pager[RegistryDataVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryDataVersionsClientListResponse]{ + More: func(page RegistryDataVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryDataVersionsClientListResponse) (RegistryDataVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, name, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryDataVersionsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryDataVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryDataVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryDataVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, name string, options *RegistryDataVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.OrderBy != nil { + reqQP.Set("$orderBy", *options.OrderBy) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.Tags != nil { + reqQP.Set("$tags", *options.Tags) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryDataVersionsClient) listHandleResponse(resp *http.Response) (RegistryDataVersionsClientListResponse, error) { + result := RegistryDataVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DataVersionBaseResourceArmPaginatedResult); err != nil { + return RegistryDataVersionsClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registryenvironmentcontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registryenvironmentcontainers_client.go new file mode 100644 index 000000000000..a102aaa84d89 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registryenvironmentcontainers_client.go @@ -0,0 +1,329 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RegistryEnvironmentContainersClient contains the methods for the RegistryEnvironmentContainers group. +// Don't use this type directly, use NewRegistryEnvironmentContainersClient() instead. +type RegistryEnvironmentContainersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryEnvironmentContainersClient creates a new instance of RegistryEnvironmentContainersClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryEnvironmentContainersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryEnvironmentContainersClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryEnvironmentContainersClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryEnvironmentContainersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - environmentName - Container name. +// - body - Container entity to create or update. +// - options - RegistryEnvironmentContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryEnvironmentContainersClient.BeginCreateOrUpdate +// method. +func (client *RegistryEnvironmentContainersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, environmentName string, body EnvironmentContainer, options *RegistryEnvironmentContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryEnvironmentContainersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, environmentName, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryEnvironmentContainersClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryEnvironmentContainersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryEnvironmentContainersClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, environmentName string, body EnvironmentContainer, options *RegistryEnvironmentContainersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, environmentName, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryEnvironmentContainersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, environmentName string, body EnvironmentContainer, options *RegistryEnvironmentContainersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if environmentName == "" { + return nil, errors.New("parameter environmentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{environmentName}", url.PathEscape(environmentName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - environmentName - Container name. +// - options - RegistryEnvironmentContainersClientBeginDeleteOptions contains the optional parameters for the RegistryEnvironmentContainersClient.BeginDelete +// method. +func (client *RegistryEnvironmentContainersClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, environmentName string, options *RegistryEnvironmentContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryEnvironmentContainersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, environmentName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryEnvironmentContainersClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryEnvironmentContainersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryEnvironmentContainersClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, environmentName string, options *RegistryEnvironmentContainersClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, environmentName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryEnvironmentContainersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, environmentName string, options *RegistryEnvironmentContainersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if environmentName == "" { + return nil, errors.New("parameter environmentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{environmentName}", url.PathEscape(environmentName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - environmentName - Container name. This is case-sensitive. +// - options - RegistryEnvironmentContainersClientGetOptions contains the optional parameters for the RegistryEnvironmentContainersClient.Get +// method. +func (client *RegistryEnvironmentContainersClient) Get(ctx context.Context, resourceGroupName string, registryName string, environmentName string, options *RegistryEnvironmentContainersClientGetOptions) (RegistryEnvironmentContainersClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, environmentName, options) + if err != nil { + return RegistryEnvironmentContainersClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryEnvironmentContainersClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryEnvironmentContainersClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryEnvironmentContainersClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, environmentName string, options *RegistryEnvironmentContainersClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if environmentName == "" { + return nil, errors.New("parameter environmentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{environmentName}", url.PathEscape(environmentName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryEnvironmentContainersClient) getHandleResponse(resp *http.Response) (RegistryEnvironmentContainersClientGetResponse, error) { + result := RegistryEnvironmentContainersClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EnvironmentContainer); err != nil { + return RegistryEnvironmentContainersClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List environment containers. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - options - RegistryEnvironmentContainersClientListOptions contains the optional parameters for the RegistryEnvironmentContainersClient.NewListPager +// method. +func (client *RegistryEnvironmentContainersClient) NewListPager(resourceGroupName string, registryName string, options *RegistryEnvironmentContainersClientListOptions) *runtime.Pager[RegistryEnvironmentContainersClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryEnvironmentContainersClientListResponse]{ + More: func(page RegistryEnvironmentContainersClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryEnvironmentContainersClientListResponse) (RegistryEnvironmentContainersClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryEnvironmentContainersClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryEnvironmentContainersClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryEnvironmentContainersClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryEnvironmentContainersClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *RegistryEnvironmentContainersClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryEnvironmentContainersClient) listHandleResponse(resp *http.Response) (RegistryEnvironmentContainersClientListResponse, error) { + result := RegistryEnvironmentContainersClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EnvironmentContainerResourceArmPaginatedResult); err != nil { + return RegistryEnvironmentContainersClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registryenvironmentversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registryenvironmentversions_client.go new file mode 100644 index 000000000000..8fc31acf6cd3 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registryenvironmentversions_client.go @@ -0,0 +1,359 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// RegistryEnvironmentVersionsClient contains the methods for the RegistryEnvironmentVersions group. +// Don't use this type directly, use NewRegistryEnvironmentVersionsClient() instead. +type RegistryEnvironmentVersionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryEnvironmentVersionsClient creates a new instance of RegistryEnvironmentVersionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryEnvironmentVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryEnvironmentVersionsClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryEnvironmentVersionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryEnvironmentVersionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - environmentName - Container name. +// - version - Version identifier. +// - body - Version entity to create or update. +// - options - RegistryEnvironmentVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.BeginCreateOrUpdate +// method. +func (client *RegistryEnvironmentVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, body EnvironmentVersion, options *RegistryEnvironmentVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryEnvironmentVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, environmentName, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryEnvironmentVersionsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryEnvironmentVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryEnvironmentVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, body EnvironmentVersion, options *RegistryEnvironmentVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, environmentName, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryEnvironmentVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, body EnvironmentVersion, options *RegistryEnvironmentVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if environmentName == "" { + return nil, errors.New("parameter environmentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{environmentName}", url.PathEscape(environmentName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - environmentName - Container name. +// - version - Version identifier. +// - options - RegistryEnvironmentVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.BeginDelete +// method. +func (client *RegistryEnvironmentVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, options *RegistryEnvironmentVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryEnvironmentVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, environmentName, version, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryEnvironmentVersionsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryEnvironmentVersionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryEnvironmentVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, options *RegistryEnvironmentVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, environmentName, version, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryEnvironmentVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, options *RegistryEnvironmentVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if environmentName == "" { + return nil, errors.New("parameter environmentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{environmentName}", url.PathEscape(environmentName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - environmentName - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - options - RegistryEnvironmentVersionsClientGetOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.Get +// method. +func (client *RegistryEnvironmentVersionsClient) Get(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, options *RegistryEnvironmentVersionsClientGetOptions) (RegistryEnvironmentVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, environmentName, version, options) + if err != nil { + return RegistryEnvironmentVersionsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryEnvironmentVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryEnvironmentVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryEnvironmentVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, environmentName string, version string, options *RegistryEnvironmentVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if environmentName == "" { + return nil, errors.New("parameter environmentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{environmentName}", url.PathEscape(environmentName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryEnvironmentVersionsClient) getHandleResponse(resp *http.Response) (RegistryEnvironmentVersionsClientGetResponse, error) { + result := RegistryEnvironmentVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EnvironmentVersion); err != nil { + return RegistryEnvironmentVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List versions. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - environmentName - Container name. This is case-sensitive. +// - options - RegistryEnvironmentVersionsClientListOptions contains the optional parameters for the RegistryEnvironmentVersionsClient.NewListPager +// method. +func (client *RegistryEnvironmentVersionsClient) NewListPager(resourceGroupName string, registryName string, environmentName string, options *RegistryEnvironmentVersionsClientListOptions) *runtime.Pager[RegistryEnvironmentVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryEnvironmentVersionsClientListResponse]{ + More: func(page RegistryEnvironmentVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryEnvironmentVersionsClientListResponse) (RegistryEnvironmentVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, environmentName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryEnvironmentVersionsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryEnvironmentVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryEnvironmentVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryEnvironmentVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, environmentName string, options *RegistryEnvironmentVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if environmentName == "" { + return nil, errors.New("parameter environmentName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{environmentName}", url.PathEscape(environmentName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.OrderBy != nil { + reqQP.Set("$orderBy", *options.OrderBy) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + if options != nil && options.Stage != nil { + reqQP.Set("stage", *options.Stage) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryEnvironmentVersionsClient) listHandleResponse(resp *http.Response) (RegistryEnvironmentVersionsClientListResponse, error) { + result := RegistryEnvironmentVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EnvironmentVersionResourceArmPaginatedResult); err != nil { + return RegistryEnvironmentVersionsClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrymodelcontainers_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrymodelcontainers_client.go new file mode 100644 index 000000000000..e3b6ee743915 --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrymodelcontainers_client.go @@ -0,0 +1,329 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RegistryModelContainersClient contains the methods for the RegistryModelContainers group. +// Don't use this type directly, use NewRegistryModelContainersClient() instead. +type RegistryModelContainersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryModelContainersClient creates a new instance of RegistryModelContainersClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryModelContainersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryModelContainersClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryModelContainersClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryModelContainersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update model container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. +// - body - Container entity to create or update. +// - options - RegistryModelContainersClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryModelContainersClient.BeginCreateOrUpdate +// method. +func (client *RegistryModelContainersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, modelName string, body ModelContainer, options *RegistryModelContainersClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryModelContainersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, modelName, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryModelContainersClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryModelContainersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update model container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryModelContainersClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, modelName string, body ModelContainer, options *RegistryModelContainersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, modelName, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryModelContainersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, body ModelContainer, options *RegistryModelContainersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. +// - options - RegistryModelContainersClientBeginDeleteOptions contains the optional parameters for the RegistryModelContainersClient.BeginDelete +// method. +func (client *RegistryModelContainersClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, modelName string, options *RegistryModelContainersClientBeginDeleteOptions) (*runtime.Poller[RegistryModelContainersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, modelName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryModelContainersClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryModelContainersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryModelContainersClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, modelName string, options *RegistryModelContainersClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, modelName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryModelContainersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, options *RegistryModelContainersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. This is case-sensitive. +// - options - RegistryModelContainersClientGetOptions contains the optional parameters for the RegistryModelContainersClient.Get +// method. +func (client *RegistryModelContainersClient) Get(ctx context.Context, resourceGroupName string, registryName string, modelName string, options *RegistryModelContainersClientGetOptions) (RegistryModelContainersClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, modelName, options) + if err != nil { + return RegistryModelContainersClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryModelContainersClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryModelContainersClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryModelContainersClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, options *RegistryModelContainersClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryModelContainersClient) getHandleResponse(resp *http.Response) (RegistryModelContainersClientGetResponse, error) { + result := RegistryModelContainersClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ModelContainer); err != nil { + return RegistryModelContainersClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List model containers. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - options - RegistryModelContainersClientListOptions contains the optional parameters for the RegistryModelContainersClient.NewListPager +// method. +func (client *RegistryModelContainersClient) NewListPager(resourceGroupName string, registryName string, options *RegistryModelContainersClientListOptions) *runtime.Pager[RegistryModelContainersClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryModelContainersClientListResponse]{ + More: func(page RegistryModelContainersClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryModelContainersClientListResponse) (RegistryModelContainersClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryModelContainersClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryModelContainersClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryModelContainersClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryModelContainersClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *RegistryModelContainersClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryModelContainersClient) listHandleResponse(resp *http.Response) (RegistryModelContainersClientListResponse, error) { + result := RegistryModelContainersClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ModelContainerResourceArmPaginatedResult); err != nil { + return RegistryModelContainersClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/registrymodelversions_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/registrymodelversions_client.go new file mode 100644 index 000000000000..c0739437148f --- /dev/null +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/registrymodelversions_client.go @@ -0,0 +1,515 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armmachinelearning + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// RegistryModelVersionsClient contains the methods for the RegistryModelVersions group. +// Don't use this type directly, use NewRegistryModelVersionsClient() instead. +type RegistryModelVersionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewRegistryModelVersionsClient creates a new instance of RegistryModelVersionsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewRegistryModelVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistryModelVersionsClient, error) { + cl, err := arm.NewClient(moduleName+".RegistryModelVersionsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &RegistryModelVersionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CreateOrGetStartPendingUpload - Generate a storage location and credential for the client to upload a model asset to. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Model name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Pending upload request object +// - options - RegistryModelVersionsClientCreateOrGetStartPendingUploadOptions contains the optional parameters for the RegistryModelVersionsClient.CreateOrGetStartPendingUpload +// method. +func (client *RegistryModelVersionsClient) CreateOrGetStartPendingUpload(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body PendingUploadRequestDto, options *RegistryModelVersionsClientCreateOrGetStartPendingUploadOptions) (RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse, error) { + req, err := client.createOrGetStartPendingUploadCreateRequest(ctx, resourceGroupName, registryName, modelName, version, body, options) + if err != nil { + return RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse{}, runtime.NewResponseError(resp) + } + return client.createOrGetStartPendingUploadHandleResponse(resp) +} + +// createOrGetStartPendingUploadCreateRequest creates the CreateOrGetStartPendingUpload request. +func (client *RegistryModelVersionsClient) createOrGetStartPendingUploadCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body PendingUploadRequestDto, options *RegistryModelVersionsClientCreateOrGetStartPendingUploadOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/startPendingUpload" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// createOrGetStartPendingUploadHandleResponse handles the CreateOrGetStartPendingUpload response. +func (client *RegistryModelVersionsClient) createOrGetStartPendingUploadHandleResponse(resp *http.Response) (RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse, error) { + result := RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PendingUploadResponseDto); err != nil { + return RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse{}, err + } + return result, nil +} + +// BeginCreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. +// - version - Version identifier. +// - body - Version entity to create or update. +// - options - RegistryModelVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the RegistryModelVersionsClient.BeginCreateOrUpdate +// method. +func (client *RegistryModelVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body ModelVersion, options *RegistryModelVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[RegistryModelVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, registryName, modelName, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryModelVersionsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryModelVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create or update version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryModelVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body ModelVersion, options *RegistryModelVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, registryName, modelName, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RegistryModelVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body ModelVersion, options *RegistryModelVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// BeginDelete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. +// - version - Version identifier. +// - options - RegistryModelVersionsClientBeginDeleteOptions contains the optional parameters for the RegistryModelVersionsClient.BeginDelete +// method. +func (client *RegistryModelVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, options *RegistryModelVersionsClientBeginDeleteOptions) (*runtime.Poller[RegistryModelVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, registryName, modelName, version, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryModelVersionsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryModelVersionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryModelVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, options *RegistryModelVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, registryName, modelName, version, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RegistryModelVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, options *RegistryModelVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get version. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - options - RegistryModelVersionsClientGetOptions contains the optional parameters for the RegistryModelVersionsClient.Get +// method. +func (client *RegistryModelVersionsClient) Get(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, options *RegistryModelVersionsClientGetOptions) (RegistryModelVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, registryName, modelName, version, options) + if err != nil { + return RegistryModelVersionsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryModelVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryModelVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RegistryModelVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, options *RegistryModelVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RegistryModelVersionsClient) getHandleResponse(resp *http.Response) (RegistryModelVersionsClientGetResponse, error) { + result := RegistryModelVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ModelVersion); err != nil { + return RegistryModelVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List versions. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. This is case-sensitive. +// - options - RegistryModelVersionsClientListOptions contains the optional parameters for the RegistryModelVersionsClient.NewListPager +// method. +func (client *RegistryModelVersionsClient) NewListPager(resourceGroupName string, registryName string, modelName string, options *RegistryModelVersionsClientListOptions) *runtime.Pager[RegistryModelVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RegistryModelVersionsClientListResponse]{ + More: func(page RegistryModelVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RegistryModelVersionsClientListResponse) (RegistryModelVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, registryName, modelName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RegistryModelVersionsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RegistryModelVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RegistryModelVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RegistryModelVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, options *RegistryModelVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", *options.Skip) + } + if options != nil && options.OrderBy != nil { + reqQP.Set("$orderBy", *options.OrderBy) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Version != nil { + reqQP.Set("version", *options.Version) + } + if options != nil && options.Description != nil { + reqQP.Set("description", *options.Description) + } + if options != nil && options.Tags != nil { + reqQP.Set("tags", *options.Tags) + } + if options != nil && options.Properties != nil { + reqQP.Set("properties", *options.Properties) + } + if options != nil && options.ListViewType != nil { + reqQP.Set("listViewType", string(*options.ListViewType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RegistryModelVersionsClient) listHandleResponse(resp *http.Response) (RegistryModelVersionsClientListResponse, error) { + result := RegistryModelVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ModelVersionResourceArmPaginatedResult); err != nil { + return RegistryModelVersionsClientListResponse{}, err + } + return result, nil +} + +// BeginPackage - Model Version Package operation. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - registryName - Name of Azure Machine Learning registry. This is case-insensitive +// - modelName - Container name. This is case-sensitive. +// - version - Version identifier. This is case-sensitive. +// - body - Package operation request body. +// - options - RegistryModelVersionsClientBeginPackageOptions contains the optional parameters for the RegistryModelVersionsClient.BeginPackage +// method. +func (client *RegistryModelVersionsClient) BeginPackage(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body PackageRequest, options *RegistryModelVersionsClientBeginPackageOptions) (*runtime.Poller[RegistryModelVersionsClientPackageResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.packageOperation(ctx, resourceGroupName, registryName, modelName, version, body, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RegistryModelVersionsClientPackageResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[RegistryModelVersionsClientPackageResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Package - Model Version Package operation. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +func (client *RegistryModelVersionsClient) packageOperation(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body PackageRequest, options *RegistryModelVersionsClientBeginPackageOptions) (*http.Response, error) { + req, err := client.packageCreateRequest(ctx, resourceGroupName, registryName, modelName, version, body, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// packageCreateRequest creates the Package request. +func (client *RegistryModelVersionsClient) packageCreateRequest(ctx context.Context, resourceGroupName string, registryName string, modelName string, version string, body PackageRequest, options *RegistryModelVersionsClientBeginPackageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/package" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if registryName == "" { + return nil, errors.New("parameter registryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{registryName}", url.PathEscape(registryName)) + if modelName == "" { + return nil, errors.New("parameter modelName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{modelName}", url.PathEscape(modelName)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/response_types.go b/sdk/resourcemanager/machinelearning/armmachinelearning/response_types.go index 4f441de7c293..3d4aba195b45 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/response_types.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/response_types.go @@ -84,6 +84,11 @@ type CodeContainersClientListResponse struct { CodeContainerResourceArmPaginatedResult } +// CodeVersionsClientCreateOrGetStartPendingUploadResponse contains the response from method CodeVersionsClient.CreateOrGetStartPendingUpload. +type CodeVersionsClientCreateOrGetStartPendingUploadResponse struct { + PendingUploadResponseDto +} + // CodeVersionsClientCreateOrUpdateResponse contains the response from method CodeVersionsClient.CreateOrUpdate. type CodeVersionsClientCreateOrUpdateResponse struct { CodeVersion @@ -199,6 +204,16 @@ type ComputeClientStopResponse struct { // placeholder for future response values } +// ComputeClientUpdateCustomServicesResponse contains the response from method ComputeClient.UpdateCustomServices. +type ComputeClientUpdateCustomServicesResponse struct { + // placeholder for future response values +} + +// ComputeClientUpdateIdleShutdownSettingResponse contains the response from method ComputeClient.UpdateIdleShutdownSetting. +type ComputeClientUpdateIdleShutdownSettingResponse struct { + // placeholder for future response values +} + // ComputeClientUpdateResponse contains the response from method ComputeClient.BeginUpdate. type ComputeClientUpdateResponse struct { ComputeResource @@ -319,6 +334,106 @@ type EnvironmentVersionsClientListResponse struct { EnvironmentVersionResourceArmPaginatedResult } +// FeaturesClientGetResponse contains the response from method FeaturesClient.Get. +type FeaturesClientGetResponse struct { + Feature +} + +// FeaturesClientListResponse contains the response from method FeaturesClient.NewListPager. +type FeaturesClientListResponse struct { + FeatureResourceArmPaginatedResult +} + +// FeaturesetContainersClientCreateOrUpdateResponse contains the response from method FeaturesetContainersClient.BeginCreateOrUpdate. +type FeaturesetContainersClientCreateOrUpdateResponse struct { + FeaturesetContainer +} + +// FeaturesetContainersClientDeleteResponse contains the response from method FeaturesetContainersClient.BeginDelete. +type FeaturesetContainersClientDeleteResponse struct { + // placeholder for future response values +} + +// FeaturesetContainersClientGetEntityResponse contains the response from method FeaturesetContainersClient.GetEntity. +type FeaturesetContainersClientGetEntityResponse struct { + FeaturesetContainer +} + +// FeaturesetContainersClientListResponse contains the response from method FeaturesetContainersClient.NewListPager. +type FeaturesetContainersClientListResponse struct { + FeaturesetContainerResourceArmPaginatedResult +} + +// FeaturesetVersionsClientBackfillResponse contains the response from method FeaturesetVersionsClient.BeginBackfill. +type FeaturesetVersionsClientBackfillResponse struct { + FeaturesetJob +} + +// FeaturesetVersionsClientCreateOrUpdateResponse contains the response from method FeaturesetVersionsClient.BeginCreateOrUpdate. +type FeaturesetVersionsClientCreateOrUpdateResponse struct { + FeaturesetVersion +} + +// FeaturesetVersionsClientDeleteResponse contains the response from method FeaturesetVersionsClient.BeginDelete. +type FeaturesetVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// FeaturesetVersionsClientGetResponse contains the response from method FeaturesetVersionsClient.Get. +type FeaturesetVersionsClientGetResponse struct { + FeaturesetVersion +} + +// FeaturesetVersionsClientListMaterializationJobsResponse contains the response from method FeaturesetVersionsClient.NewListMaterializationJobsPager. +type FeaturesetVersionsClientListMaterializationJobsResponse struct { + FeaturesetJobArmPaginatedResult +} + +// FeaturesetVersionsClientListResponse contains the response from method FeaturesetVersionsClient.NewListPager. +type FeaturesetVersionsClientListResponse struct { + FeaturesetVersionResourceArmPaginatedResult +} + +// FeaturestoreEntityContainersClientCreateOrUpdateResponse contains the response from method FeaturestoreEntityContainersClient.BeginCreateOrUpdate. +type FeaturestoreEntityContainersClientCreateOrUpdateResponse struct { + FeaturestoreEntityContainer +} + +// FeaturestoreEntityContainersClientDeleteResponse contains the response from method FeaturestoreEntityContainersClient.BeginDelete. +type FeaturestoreEntityContainersClientDeleteResponse struct { + // placeholder for future response values +} + +// FeaturestoreEntityContainersClientGetEntityResponse contains the response from method FeaturestoreEntityContainersClient.GetEntity. +type FeaturestoreEntityContainersClientGetEntityResponse struct { + FeaturestoreEntityContainer +} + +// FeaturestoreEntityContainersClientListResponse contains the response from method FeaturestoreEntityContainersClient.NewListPager. +type FeaturestoreEntityContainersClientListResponse struct { + FeaturestoreEntityContainerResourceArmPaginatedResult +} + +// FeaturestoreEntityVersionsClientCreateOrUpdateResponse contains the response from method FeaturestoreEntityVersionsClient.BeginCreateOrUpdate. +type FeaturestoreEntityVersionsClientCreateOrUpdateResponse struct { + FeaturestoreEntityVersion +} + +// FeaturestoreEntityVersionsClientDeleteResponse contains the response from method FeaturestoreEntityVersionsClient.BeginDelete. +type FeaturestoreEntityVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// FeaturestoreEntityVersionsClientGetResponse contains the response from method FeaturestoreEntityVersionsClient.Get. +type FeaturestoreEntityVersionsClientGetResponse struct { + FeaturestoreEntityVersion +} + +// FeaturestoreEntityVersionsClientListResponse contains the response from method FeaturestoreEntityVersionsClient.NewListPager. +type FeaturestoreEntityVersionsClientListResponse struct { + FeaturestoreEntityVersionResourceArmPaginatedResult +} + // JobsClientCancelResponse contains the response from method JobsClient.BeginCancel. type JobsClientCancelResponse struct { // placeholder for future response values @@ -344,6 +459,81 @@ type JobsClientListResponse struct { JobBaseResourceArmPaginatedResult } +// JobsClientUpdateResponse contains the response from method JobsClient.Update. +type JobsClientUpdateResponse struct { + JobBase +} + +// LabelingJobsClientCreateOrUpdateResponse contains the response from method LabelingJobsClient.BeginCreateOrUpdate. +type LabelingJobsClientCreateOrUpdateResponse struct { + LabelingJob +} + +// LabelingJobsClientDeleteResponse contains the response from method LabelingJobsClient.Delete. +type LabelingJobsClientDeleteResponse struct { + // placeholder for future response values +} + +// LabelingJobsClientExportLabelsResponse contains the response from method LabelingJobsClient.BeginExportLabels. +type LabelingJobsClientExportLabelsResponse struct { + ExportSummaryClassification +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LabelingJobsClientExportLabelsResponse. +func (l *LabelingJobsClientExportLabelsResponse) UnmarshalJSON(data []byte) error { + res, err := unmarshalExportSummaryClassification(data) + if err != nil { + return err + } + l.ExportSummaryClassification = res + return nil +} + +// LabelingJobsClientGetResponse contains the response from method LabelingJobsClient.Get. +type LabelingJobsClientGetResponse struct { + LabelingJob +} + +// LabelingJobsClientListResponse contains the response from method LabelingJobsClient.NewListPager. +type LabelingJobsClientListResponse struct { + LabelingJobResourceArmPaginatedResult +} + +// LabelingJobsClientPauseResponse contains the response from method LabelingJobsClient.Pause. +type LabelingJobsClientPauseResponse struct { + // placeholder for future response values +} + +// LabelingJobsClientResumeResponse contains the response from method LabelingJobsClient.BeginResume. +type LabelingJobsClientResumeResponse struct { + // placeholder for future response values +} + +// ManagedNetworkProvisionsClientProvisionManagedNetworkResponse contains the response from method ManagedNetworkProvisionsClient.BeginProvisionManagedNetwork. +type ManagedNetworkProvisionsClientProvisionManagedNetworkResponse struct { + ManagedNetworkProvisionStatus +} + +// ManagedNetworkSettingsRuleClientCreateOrUpdateResponse contains the response from method ManagedNetworkSettingsRuleClient.BeginCreateOrUpdate. +type ManagedNetworkSettingsRuleClientCreateOrUpdateResponse struct { + OutboundRuleBasicResource +} + +// ManagedNetworkSettingsRuleClientDeleteResponse contains the response from method ManagedNetworkSettingsRuleClient.BeginDelete. +type ManagedNetworkSettingsRuleClientDeleteResponse struct { + // placeholder for future response values +} + +// ManagedNetworkSettingsRuleClientGetResponse contains the response from method ManagedNetworkSettingsRuleClient.Get. +type ManagedNetworkSettingsRuleClientGetResponse struct { + OutboundRuleBasicResource +} + +// ManagedNetworkSettingsRuleClientListResponse contains the response from method ManagedNetworkSettingsRuleClient.NewListPager. +type ManagedNetworkSettingsRuleClientListResponse struct { + OutboundRuleListResult +} + // ModelContainersClientCreateOrUpdateResponse contains the response from method ModelContainersClient.CreateOrUpdate. type ModelContainersClientCreateOrUpdateResponse struct { ModelContainer @@ -384,6 +574,11 @@ type ModelVersionsClientListResponse struct { ModelVersionResourceArmPaginatedResult } +// ModelVersionsClientPackageResponse contains the response from method ModelVersionsClient.BeginPackage. +type ModelVersionsClientPackageResponse struct { + PackageResponse +} + // OnlineDeploymentsClientCreateOrUpdateResponse contains the response from method OnlineDeploymentsClient.BeginCreateOrUpdate. type OnlineDeploymentsClientCreateOrUpdateResponse struct { OnlineDeployment @@ -484,7 +679,7 @@ type PrivateEndpointConnectionsClientListResponse struct { PrivateEndpointConnectionListResult } -// PrivateLinkResourcesClientListResponse contains the response from method PrivateLinkResourcesClient.List. +// PrivateLinkResourcesClientListResponse contains the response from method PrivateLinkResourcesClient.NewListPager. type PrivateLinkResourcesClientListResponse struct { PrivateLinkResourceListResult } @@ -499,6 +694,261 @@ type QuotasClientUpdateResponse struct { UpdateWorkspaceQuotasResult } +// RegistriesClientCreateOrUpdateResponse contains the response from method RegistriesClient.BeginCreateOrUpdate. +type RegistriesClientCreateOrUpdateResponse struct { + Registry +} + +// RegistriesClientDeleteResponse contains the response from method RegistriesClient.BeginDelete. +type RegistriesClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistriesClientGetResponse contains the response from method RegistriesClient.Get. +type RegistriesClientGetResponse struct { + Registry +} + +// RegistriesClientListBySubscriptionResponse contains the response from method RegistriesClient.NewListBySubscriptionPager. +type RegistriesClientListBySubscriptionResponse struct { + RegistryTrackedResourceArmPaginatedResult +} + +// RegistriesClientListResponse contains the response from method RegistriesClient.NewListPager. +type RegistriesClientListResponse struct { + RegistryTrackedResourceArmPaginatedResult +} + +// RegistriesClientRemoveRegionsResponse contains the response from method RegistriesClient.BeginRemoveRegions. +type RegistriesClientRemoveRegionsResponse struct { + Registry +} + +// RegistriesClientUpdateResponse contains the response from method RegistriesClient.Update. +type RegistriesClientUpdateResponse struct { + Registry +} + +// RegistryCodeContainersClientCreateOrUpdateResponse contains the response from method RegistryCodeContainersClient.BeginCreateOrUpdate. +type RegistryCodeContainersClientCreateOrUpdateResponse struct { + CodeContainer +} + +// RegistryCodeContainersClientDeleteResponse contains the response from method RegistryCodeContainersClient.BeginDelete. +type RegistryCodeContainersClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryCodeContainersClientGetResponse contains the response from method RegistryCodeContainersClient.Get. +type RegistryCodeContainersClientGetResponse struct { + CodeContainer +} + +// RegistryCodeContainersClientListResponse contains the response from method RegistryCodeContainersClient.NewListPager. +type RegistryCodeContainersClientListResponse struct { + CodeContainerResourceArmPaginatedResult +} + +// RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse contains the response from method RegistryCodeVersionsClient.CreateOrGetStartPendingUpload. +type RegistryCodeVersionsClientCreateOrGetStartPendingUploadResponse struct { + PendingUploadResponseDto +} + +// RegistryCodeVersionsClientCreateOrUpdateResponse contains the response from method RegistryCodeVersionsClient.BeginCreateOrUpdate. +type RegistryCodeVersionsClientCreateOrUpdateResponse struct { + CodeVersion +} + +// RegistryCodeVersionsClientDeleteResponse contains the response from method RegistryCodeVersionsClient.BeginDelete. +type RegistryCodeVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryCodeVersionsClientGetResponse contains the response from method RegistryCodeVersionsClient.Get. +type RegistryCodeVersionsClientGetResponse struct { + CodeVersion +} + +// RegistryCodeVersionsClientListResponse contains the response from method RegistryCodeVersionsClient.NewListPager. +type RegistryCodeVersionsClientListResponse struct { + CodeVersionResourceArmPaginatedResult +} + +// RegistryComponentContainersClientCreateOrUpdateResponse contains the response from method RegistryComponentContainersClient.BeginCreateOrUpdate. +type RegistryComponentContainersClientCreateOrUpdateResponse struct { + ComponentContainer +} + +// RegistryComponentContainersClientDeleteResponse contains the response from method RegistryComponentContainersClient.BeginDelete. +type RegistryComponentContainersClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryComponentContainersClientGetResponse contains the response from method RegistryComponentContainersClient.Get. +type RegistryComponentContainersClientGetResponse struct { + ComponentContainer +} + +// RegistryComponentContainersClientListResponse contains the response from method RegistryComponentContainersClient.NewListPager. +type RegistryComponentContainersClientListResponse struct { + ComponentContainerResourceArmPaginatedResult +} + +// RegistryComponentVersionsClientCreateOrUpdateResponse contains the response from method RegistryComponentVersionsClient.BeginCreateOrUpdate. +type RegistryComponentVersionsClientCreateOrUpdateResponse struct { + ComponentVersion +} + +// RegistryComponentVersionsClientDeleteResponse contains the response from method RegistryComponentVersionsClient.BeginDelete. +type RegistryComponentVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryComponentVersionsClientGetResponse contains the response from method RegistryComponentVersionsClient.Get. +type RegistryComponentVersionsClientGetResponse struct { + ComponentVersion +} + +// RegistryComponentVersionsClientListResponse contains the response from method RegistryComponentVersionsClient.NewListPager. +type RegistryComponentVersionsClientListResponse struct { + ComponentVersionResourceArmPaginatedResult +} + +// RegistryDataContainersClientCreateOrUpdateResponse contains the response from method RegistryDataContainersClient.BeginCreateOrUpdate. +type RegistryDataContainersClientCreateOrUpdateResponse struct { + DataContainer +} + +// RegistryDataContainersClientDeleteResponse contains the response from method RegistryDataContainersClient.BeginDelete. +type RegistryDataContainersClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryDataContainersClientGetResponse contains the response from method RegistryDataContainersClient.Get. +type RegistryDataContainersClientGetResponse struct { + DataContainer +} + +// RegistryDataContainersClientListResponse contains the response from method RegistryDataContainersClient.NewListPager. +type RegistryDataContainersClientListResponse struct { + DataContainerResourceArmPaginatedResult +} + +// RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse contains the response from method RegistryDataVersionsClient.CreateOrGetStartPendingUpload. +type RegistryDataVersionsClientCreateOrGetStartPendingUploadResponse struct { + PendingUploadResponseDto +} + +// RegistryDataVersionsClientCreateOrUpdateResponse contains the response from method RegistryDataVersionsClient.BeginCreateOrUpdate. +type RegistryDataVersionsClientCreateOrUpdateResponse struct { + DataVersionBase +} + +// RegistryDataVersionsClientDeleteResponse contains the response from method RegistryDataVersionsClient.BeginDelete. +type RegistryDataVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryDataVersionsClientGetResponse contains the response from method RegistryDataVersionsClient.Get. +type RegistryDataVersionsClientGetResponse struct { + DataVersionBase +} + +// RegistryDataVersionsClientListResponse contains the response from method RegistryDataVersionsClient.NewListPager. +type RegistryDataVersionsClientListResponse struct { + DataVersionBaseResourceArmPaginatedResult +} + +// RegistryEnvironmentContainersClientCreateOrUpdateResponse contains the response from method RegistryEnvironmentContainersClient.BeginCreateOrUpdate. +type RegistryEnvironmentContainersClientCreateOrUpdateResponse struct { + EnvironmentContainer +} + +// RegistryEnvironmentContainersClientDeleteResponse contains the response from method RegistryEnvironmentContainersClient.BeginDelete. +type RegistryEnvironmentContainersClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryEnvironmentContainersClientGetResponse contains the response from method RegistryEnvironmentContainersClient.Get. +type RegistryEnvironmentContainersClientGetResponse struct { + EnvironmentContainer +} + +// RegistryEnvironmentContainersClientListResponse contains the response from method RegistryEnvironmentContainersClient.NewListPager. +type RegistryEnvironmentContainersClientListResponse struct { + EnvironmentContainerResourceArmPaginatedResult +} + +// RegistryEnvironmentVersionsClientCreateOrUpdateResponse contains the response from method RegistryEnvironmentVersionsClient.BeginCreateOrUpdate. +type RegistryEnvironmentVersionsClientCreateOrUpdateResponse struct { + EnvironmentVersion +} + +// RegistryEnvironmentVersionsClientDeleteResponse contains the response from method RegistryEnvironmentVersionsClient.BeginDelete. +type RegistryEnvironmentVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryEnvironmentVersionsClientGetResponse contains the response from method RegistryEnvironmentVersionsClient.Get. +type RegistryEnvironmentVersionsClientGetResponse struct { + EnvironmentVersion +} + +// RegistryEnvironmentVersionsClientListResponse contains the response from method RegistryEnvironmentVersionsClient.NewListPager. +type RegistryEnvironmentVersionsClientListResponse struct { + EnvironmentVersionResourceArmPaginatedResult +} + +// RegistryModelContainersClientCreateOrUpdateResponse contains the response from method RegistryModelContainersClient.BeginCreateOrUpdate. +type RegistryModelContainersClientCreateOrUpdateResponse struct { + ModelContainer +} + +// RegistryModelContainersClientDeleteResponse contains the response from method RegistryModelContainersClient.BeginDelete. +type RegistryModelContainersClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryModelContainersClientGetResponse contains the response from method RegistryModelContainersClient.Get. +type RegistryModelContainersClientGetResponse struct { + ModelContainer +} + +// RegistryModelContainersClientListResponse contains the response from method RegistryModelContainersClient.NewListPager. +type RegistryModelContainersClientListResponse struct { + ModelContainerResourceArmPaginatedResult +} + +// RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse contains the response from method RegistryModelVersionsClient.CreateOrGetStartPendingUpload. +type RegistryModelVersionsClientCreateOrGetStartPendingUploadResponse struct { + PendingUploadResponseDto +} + +// RegistryModelVersionsClientCreateOrUpdateResponse contains the response from method RegistryModelVersionsClient.BeginCreateOrUpdate. +type RegistryModelVersionsClientCreateOrUpdateResponse struct { + ModelVersion +} + +// RegistryModelVersionsClientDeleteResponse contains the response from method RegistryModelVersionsClient.BeginDelete. +type RegistryModelVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// RegistryModelVersionsClientGetResponse contains the response from method RegistryModelVersionsClient.Get. +type RegistryModelVersionsClientGetResponse struct { + ModelVersion +} + +// RegistryModelVersionsClientListResponse contains the response from method RegistryModelVersionsClient.NewListPager. +type RegistryModelVersionsClientListResponse struct { + ModelVersionResourceArmPaginatedResult +} + +// RegistryModelVersionsClientPackageResponse contains the response from method RegistryModelVersionsClient.BeginPackage. +type RegistryModelVersionsClientPackageResponse struct { + PackageResponse +} + // SchedulesClientCreateOrUpdateResponse contains the response from method SchedulesClient.BeginCreateOrUpdate. type SchedulesClientCreateOrUpdateResponse struct { Schedule @@ -549,6 +999,16 @@ type WorkspaceConnectionsClientListResponse struct { WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult } +// WorkspaceConnectionsClientListSecretsResponse contains the response from method WorkspaceConnectionsClient.ListSecrets. +type WorkspaceConnectionsClientListSecretsResponse struct { + WorkspaceConnectionPropertiesV2BasicResource +} + +// WorkspaceConnectionsClientUpdateResponse contains the response from method WorkspaceConnectionsClient.Update. +type WorkspaceConnectionsClientUpdateResponse struct { + WorkspaceConnectionPropertiesV2BasicResource +} + // WorkspaceFeaturesClientListResponse contains the response from method WorkspaceFeaturesClient.NewListPager. type WorkspaceFeaturesClientListResponse struct { ListAmlUserFeatureResult diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/schedules_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/schedules_client.go index 9a49a581f556..870eee972013 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/schedules_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/schedules_client.go @@ -47,7 +47,7 @@ func NewSchedulesClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Create or update schedule. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Schedule name. @@ -60,7 +60,9 @@ func (client *SchedulesClient) BeginCreateOrUpdate(ctx context.Context, resource if err != nil { return nil, err } - return runtime.NewPoller[SchedulesClientCreateOrUpdateResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SchedulesClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaOriginalURI, + }) } else { return runtime.NewPollerFromResumeToken[SchedulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -69,7 +71,7 @@ func (client *SchedulesClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Create or update schedule. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *SchedulesClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, name string, body Schedule, options *SchedulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, name, body, options) if err != nil { @@ -109,7 +111,7 @@ func (client *SchedulesClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, runtime.MarshalAsJSON(req, body) @@ -118,7 +120,7 @@ func (client *SchedulesClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Delete schedule. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Schedule name. @@ -129,7 +131,9 @@ func (client *SchedulesClient) BeginDelete(ctx context.Context, resourceGroupNam if err != nil { return nil, err } - return runtime.NewPoller[SchedulesClientDeleteResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SchedulesClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[SchedulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -138,7 +142,7 @@ func (client *SchedulesClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Delete schedule. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *SchedulesClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, name string, options *SchedulesClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, name, options) if err != nil { @@ -178,7 +182,7 @@ func (client *SchedulesClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -187,7 +191,7 @@ func (client *SchedulesClient) deleteCreateRequest(ctx context.Context, resource // Get - Get schedule. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - name - Schedule name. @@ -231,7 +235,7 @@ func (client *SchedulesClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -248,7 +252,7 @@ func (client *SchedulesClient) getHandleResponse(resp *http.Response) (Schedules // NewListPager - List schedules in specified workspace. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - SchedulesClientListOptions contains the optional parameters for the SchedulesClient.NewListPager method. @@ -300,7 +304,7 @@ func (client *SchedulesClient) listCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/schedules_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/schedules_client_example_test.go deleted file mode 100644 index c6a64ae0532d..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/schedules_client_example_test.go +++ /dev/null @@ -1,253 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Schedule/list.json -func ExampleSchedulesClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewSchedulesClient().NewListPager("test-rg", "my-aml-workspace", &armmachinelearning.SchedulesClientListOptions{Skip: nil, - ListViewType: nil, - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ScheduleResourceArmPaginatedResult = armmachinelearning.ScheduleResourceArmPaginatedResult{ - // Value: []*armmachinelearning.Schedule{ - // { - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeKey), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeApplication), - // }, - // Properties: &armmachinelearning.ScheduleProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Action: &armmachinelearning.EndpointScheduleAction{ - // ActionType: to.Ptr(armmachinelearning.ScheduleActionTypeInvokeBatchEndpoint), - // EndpointInvocationDefinition: map[string]any{ - // "00cd1396-a094-4d48-8d86-14c43a55a6af": nil, - // }, - // }, - // DisplayName: to.Ptr("string"), - // IsEnabled: to.Ptr(false), - // ProvisioningState: to.Ptr(armmachinelearning.ScheduleProvisioningStatusSucceeded), - // Trigger: &armmachinelearning.CronTrigger{ - // EndTime: to.Ptr("string"), - // StartTime: to.Ptr("string"), - // TimeZone: to.Ptr("string"), - // TriggerType: to.Ptr(armmachinelearning.TriggerTypeCron), - // Expression: to.Ptr("string"), - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Schedule/delete.json -func ExampleSchedulesClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewSchedulesClient().BeginDelete(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Schedule/get.json -func ExampleSchedulesClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewSchedulesClient().Get(ctx, "test-rg", "my-aml-workspace", "string", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Schedule = armmachinelearning.Schedule{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeKey), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeKey), - // }, - // Properties: &armmachinelearning.ScheduleProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Action: &armmachinelearning.EndpointScheduleAction{ - // ActionType: to.Ptr(armmachinelearning.ScheduleActionTypeInvokeBatchEndpoint), - // EndpointInvocationDefinition: map[string]any{ - // "a108545b-def1-4c86-8e53-dbcb1de3a8bc": nil, - // }, - // }, - // DisplayName: to.Ptr("string"), - // IsEnabled: to.Ptr(false), - // ProvisioningState: to.Ptr(armmachinelearning.ScheduleProvisioningStatusSucceeded), - // Trigger: &armmachinelearning.CronTrigger{ - // EndTime: to.Ptr("string"), - // StartTime: to.Ptr("string"), - // TimeZone: to.Ptr("string"), - // TriggerType: to.Ptr(armmachinelearning.TriggerTypeCron), - // Expression: to.Ptr("string"), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Schedule/createOrUpdate.json -func ExampleSchedulesClient_BeginCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewSchedulesClient().BeginCreateOrUpdate(ctx, "test-rg", "my-aml-workspace", "string", armmachinelearning.Schedule{ - Properties: &armmachinelearning.ScheduleProperties{ - Description: to.Ptr("string"), - Properties: map[string]*string{ - "string": to.Ptr("string"), - }, - Tags: map[string]*string{ - "string": to.Ptr("string"), - }, - Action: &armmachinelearning.EndpointScheduleAction{ - ActionType: to.Ptr(armmachinelearning.ScheduleActionTypeInvokeBatchEndpoint), - EndpointInvocationDefinition: map[string]any{ - "9965593e-526f-4b89-bb36-761138cf2794": nil, - }, - }, - DisplayName: to.Ptr("string"), - IsEnabled: to.Ptr(false), - Trigger: &armmachinelearning.CronTrigger{ - EndTime: to.Ptr("string"), - StartTime: to.Ptr("string"), - TimeZone: to.Ptr("string"), - TriggerType: to.Ptr(armmachinelearning.TriggerTypeCron), - Expression: to.Ptr("string"), - }, - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Schedule = armmachinelearning.Schedule{ - // Name: to.Ptr("string"), - // Type: to.Ptr("string"), - // ID: to.Ptr("string"), - // SystemData: &armmachinelearning.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // CreatedBy: to.Ptr("string"), - // CreatedByType: to.Ptr(armmachinelearning.CreatedByTypeKey), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T12:34:56.999Z"); return t}()), - // LastModifiedBy: to.Ptr("string"), - // LastModifiedByType: to.Ptr(armmachinelearning.CreatedByTypeApplication), - // }, - // Properties: &armmachinelearning.ScheduleProperties{ - // Description: to.Ptr("string"), - // Properties: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Tags: map[string]*string{ - // "string": to.Ptr("string"), - // }, - // Action: &armmachinelearning.EndpointScheduleAction{ - // ActionType: to.Ptr(armmachinelearning.ScheduleActionTypeInvokeBatchEndpoint), - // EndpointInvocationDefinition: map[string]any{ - // "d77a9a9a-4bb5-4c0c-8a77-459be8b82b9f": nil, - // }, - // }, - // DisplayName: to.Ptr("string"), - // IsEnabled: to.Ptr(false), - // ProvisioningState: to.Ptr(armmachinelearning.ScheduleProvisioningStatusSucceeded), - // Trigger: &armmachinelearning.CronTrigger{ - // EndTime: to.Ptr("string"), - // StartTime: to.Ptr("string"), - // TimeZone: to.Ptr("string"), - // TriggerType: to.Ptr(armmachinelearning.TriggerTypeCron), - // Expression: to.Ptr("string"), - // }, - // }, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/usages_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/usages_client.go index d1502a42de3e..e5e0ebd245d9 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/usages_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/usages_client.go @@ -46,7 +46,7 @@ func NewUsagesClient(subscriptionID string, credential azcore.TokenCredential, o // NewListPager - Gets the current usage information as well as limits for AML resources for given subscription and location. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - location - The location for which resource usage is queried. // - options - UsagesClientListOptions contains the optional parameters for the UsagesClient.NewListPager method. func (client *UsagesClient) NewListPager(location string, options *UsagesClientListOptions) *runtime.Pager[UsagesClientListResponse] { @@ -93,7 +93,7 @@ func (client *UsagesClient) listCreateRequest(ctx context.Context, location stri return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/usages_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/usages_client_example_test.go deleted file mode 100644 index e3b70a9c0454..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/usages_client_example_test.go +++ /dev/null @@ -1,431 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Usage/list.json -func ExampleUsagesClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewUsagesClient().NewListPager("eastus", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ListUsagesResult = armmachinelearning.ListUsagesResult{ - // Value: []*armmachinelearning.Usage{ - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Clusters"), - // Value: to.Ptr("Clusters"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/totalCores/usages"), - // CurrentValue: to.Ptr[int64](7), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages"), - // Limit: to.Ptr[int64](100), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Total Cluster Dedicated Regional vCPUs"), - // Value: to.Ptr("Total Cluster Dedicated Regional vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/dedicatedCores/usages"), - // CurrentValue: to.Ptr[int64](14), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard D Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_D_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](48), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](2), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/usages"), - // CurrentValue: to.Ptr[int64](2), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/usages/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes/usages"), - // CurrentValue: to.Ptr[int64](2), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/computes/demo_cluster1_dsv2/usages/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/computes/demo_cluster2_dsv2/usages/Standard_DSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard Dv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_Dv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard FSv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_FSv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](12), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspace/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/usages/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspace/computes/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/computes/demo_cluster1_nc/usages/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/usages/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/computes/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/computes/demo_cluser1_nc/usages/Standard_NC_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NCv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NCv3 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NCv3_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard ND Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_ND_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NDv2 Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NDv2_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](0), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // Value: to.Ptr("Standard NV Family Cluster Dedicated vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NV_Family_Cluster_Dedicated_vCPUs"), - // Limit: to.Ptr[int64](24), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Total Cluster LowPriority Regional vCPUs"), - // Value: to.Ptr("Total Cluster LowPriority Regional vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/lowPriorityCores/usages"), - // CurrentValue: to.Ptr[int64](18), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages"), - // Limit: to.Ptr[int64](50), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard D Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard D Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_D_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard DSv2 Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard DSv2 Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_DSv2_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard Dv2 Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard Dv2 Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_Dv2_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard FSv2 Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard FSv2 Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_FSv2_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](18), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NC_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspace/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/usages/Standard_NC_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspace/computes/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace1/computes/demo_cluster1_lowPriority_nc/usages/Standard_NC_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspace/usages"), - // CurrentValue: to.Ptr[int64](12), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/usages/Standard_NC_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspace/computes/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/computes/demo_cluster2_lowPriority_nc/usages/Standard_NC_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NC Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/workspace/computes/usages"), - // CurrentValue: to.Ptr[int64](6), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.MachineLearningServices/workspaces/demo_workspace2/computes/demo_cluster3_lowPriority_nc/usages/Standard_NC_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NCv2 Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NCv2 Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NCv2_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NCv3 Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NCv3 Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NCv3_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard ND Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard ND Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_ND_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NDv2 Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NDv2 Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NDv2_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }, - // { - // Name: &armmachinelearning.UsageName{ - // LocalizedValue: to.Ptr("Standard NV Family Cluster LowPriority vCPUs"), - // Value: to.Ptr("Standard NV Family Cluster LowPriority vCPUs"), - // }, - // Type: to.Ptr("Microsoft.MachineLearningServices/vmFamily/usages"), - // CurrentValue: to.Ptr[int64](0), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/usages/Standard_NV_Family_Cluster_LowPriority_vCPUs"), - // Limit: to.Ptr[int64](-1), - // Unit: to.Ptr(armmachinelearning.UsageUnitCount), - // }}, - // } - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/virtualmachinesizes_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/virtualmachinesizes_client.go index 153c5e856b4d..3977714c2367 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/virtualmachinesizes_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/virtualmachinesizes_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineSizesClient(subscriptionID string, credential azcore.Token // List - Returns supported VM Sizes in a location // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - location - The location upon which virtual-machine-sizes is queried. // - options - VirtualMachineSizesClientListOptions contains the optional parameters for the VirtualMachineSizesClient.List // method. @@ -82,7 +82,7 @@ func (client *VirtualMachineSizesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/virtualmachinesizes_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/virtualmachinesizes_client_example_test.go deleted file mode 100644 index 4406ae32cdef..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/virtualmachinesizes_client_example_test.go +++ /dev/null @@ -1,396 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/VirtualMachineSize/list.json -func ExampleVirtualMachineSizesClient_List() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewVirtualMachineSizesClient().List(ctx, "eastus", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.VirtualMachineSizeListResult = armmachinelearning.VirtualMachineSizeListResult{ - // Value: []*armmachinelearning.VirtualMachineSize{ - // { - // Name: to.Ptr("Standard_DS1_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.13), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.01), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.07), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.05), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](7168), - // MemoryGB: to.Ptr[float64](3.5), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance")}, - // VCPUs: to.Ptr[int32](1), - // }, - // { - // Name: to.Ptr("Standard_DS2_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.03), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.15), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.1), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.25), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](14336), - // MemoryGB: to.Ptr[float64](7), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance"), - // to.Ptr("MIR")}, - // VCPUs: to.Ptr[int32](2), - // }, - // { - // Name: to.Ptr("Standard_DS3_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.2), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.06), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.5), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.29), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](28672), - // MemoryGB: to.Ptr[float64](14), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance"), - // to.Ptr("MIR")}, - // VCPUs: to.Ptr[int32](4), - // }, - // { - // Name: to.Ptr("Standard_DS4_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.12), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.4), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](1.01), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.58), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](57344), - // MemoryGB: to.Ptr[float64](28), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance"), - // to.Ptr("MIR")}, - // VCPUs: to.Ptr[int32](8), - // }, - // { - // Name: to.Ptr("Standard_DS5_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](1.17), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.81), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](2.02), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.23), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](114688), - // MemoryGB: to.Ptr[float64](56), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance"), - // to.Ptr("MIR")}, - // VCPUs: to.Ptr[int32](16), - // }, - // { - // Name: to.Ptr("Standard_DS11_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.26), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.18), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.11), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.04), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](28672), - // MemoryGB: to.Ptr[float64](14), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance")}, - // VCPUs: to.Ptr[int32](2), - // }, - // { - // Name: to.Ptr("Standard_DS12_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.37), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.53), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.21), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.07), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](57344), - // MemoryGB: to.Ptr[float64](28), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance")}, - // VCPUs: to.Ptr[int32](4), - // }, - // { - // Name: to.Ptr("Standard_DS13_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.15), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.42), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.74), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](1.06), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](114688), - // MemoryGB: to.Ptr[float64](56), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance")}, - // VCPUs: to.Ptr[int32](8), - // }, - // { - // Name: to.Ptr("Standard_DS14_v2"), - // EstimatedVMPrices: &armmachinelearning.EstimatedVMPrices{ - // BillingCurrency: to.Ptr(armmachinelearning.BillingCurrencyUSD), - // UnitOfMeasure: to.Ptr(armmachinelearning.UnitOfMeasureOneHour), - // Values: []*armmachinelearning.EstimatedVMPrice{ - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](0.3), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeLinux), - // RetailPrice: to.Ptr[float64](1.48), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](0.84), - // VMTier: to.Ptr(armmachinelearning.VMTierLowPriority), - // }, - // { - // OSType: to.Ptr(armmachinelearning.VMPriceOSTypeWindows), - // RetailPrice: to.Ptr[float64](2.11), - // VMTier: to.Ptr(armmachinelearning.VMTierStandard), - // }}, - // }, - // Family: to.Ptr("standardDSv2Family"), - // Gpus: to.Ptr[int32](0), - // LowPriorityCapable: to.Ptr(true), - // MaxResourceVolumeMB: to.Ptr[int32](229376), - // MemoryGB: to.Ptr[float64](112), - // OSVhdSizeMB: to.Ptr[int32](1047552), - // PremiumIO: to.Ptr(true), - // SupportedComputeTypes: []*string{ - // to.Ptr("AmlCompute"), - // to.Ptr("ComputeInstance")}, - // VCPUs: to.Ptr[int32](16), - // }}, - // } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaceconnections_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/workspaceconnections_client.go index ead48db60a4d..40314ceba32f 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaceconnections_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/workspaceconnections_client.go @@ -44,18 +44,17 @@ func NewWorkspaceConnectionsClient(subscriptionID string, credential azcore.Toke return client, nil } -// Create - +// Create - Create or update machine learning workspaces connections under the specified workspace. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - connectionName - Friendly name of the workspace connection -// - parameters - The object for creating or updating a new workspace connection // - options - WorkspaceConnectionsClientCreateOptions contains the optional parameters for the WorkspaceConnectionsClient.Create // method. -func (client *WorkspaceConnectionsClient) Create(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, parameters WorkspaceConnectionPropertiesV2BasicResource, options *WorkspaceConnectionsClientCreateOptions) (WorkspaceConnectionsClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, resourceGroupName, workspaceName, connectionName, parameters, options) +func (client *WorkspaceConnectionsClient) Create(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, options *WorkspaceConnectionsClientCreateOptions) (WorkspaceConnectionsClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, resourceGroupName, workspaceName, connectionName, options) if err != nil { return WorkspaceConnectionsClientCreateResponse{}, err } @@ -70,7 +69,7 @@ func (client *WorkspaceConnectionsClient) Create(ctx context.Context, resourceGr } // createCreateRequest creates the Create request. -func (client *WorkspaceConnectionsClient) createCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, parameters WorkspaceConnectionPropertiesV2BasicResource, options *WorkspaceConnectionsClientCreateOptions) (*policy.Request, error) { +func (client *WorkspaceConnectionsClient) createCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, options *WorkspaceConnectionsClientCreateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") @@ -93,10 +92,13 @@ func (client *WorkspaceConnectionsClient) createCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} - return req, runtime.MarshalAsJSON(req, parameters) + if options != nil && options.Body != nil { + return req, runtime.MarshalAsJSON(req, *options.Body) + } + return req, nil } // createHandleResponse handles the Create response. @@ -108,10 +110,10 @@ func (client *WorkspaceConnectionsClient) createHandleResponse(resp *http.Respon return result, nil } -// Delete - +// Delete - Delete machine learning workspaces connections by name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - connectionName - Friendly name of the workspace connection @@ -156,16 +158,16 @@ func (client *WorkspaceConnectionsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil } -// Get - +// Get - Lists machine learning workspaces connections by name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - connectionName - Friendly name of the workspace connection @@ -210,7 +212,7 @@ func (client *WorkspaceConnectionsClient) getCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,6 +227,9 @@ func (client *WorkspaceConnectionsClient) getHandleResponse(resp *http.Response) return result, nil } +// NewListPager - Lists all the available machine learning workspaces connections under the specified workspace. +// +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspaceConnectionsClientListOptions contains the optional parameters for the WorkspaceConnectionsClient.NewListPager @@ -283,7 +288,7 @@ func (client *WorkspaceConnectionsClient) listCreateRequest(ctx context.Context, if options != nil && options.Category != nil { reqQP.Set("category", *options.Category) } - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -297,3 +302,132 @@ func (client *WorkspaceConnectionsClient) listHandleResponse(resp *http.Response } return result, nil } + +// ListSecrets - List all the secrets of a machine learning workspaces connections. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - connectionName - Friendly name of the workspace connection +// - options - WorkspaceConnectionsClientListSecretsOptions contains the optional parameters for the WorkspaceConnectionsClient.ListSecrets +// method. +func (client *WorkspaceConnectionsClient) ListSecrets(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, options *WorkspaceConnectionsClientListSecretsOptions) (WorkspaceConnectionsClientListSecretsResponse, error) { + req, err := client.listSecretsCreateRequest(ctx, resourceGroupName, workspaceName, connectionName, options) + if err != nil { + return WorkspaceConnectionsClientListSecretsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return WorkspaceConnectionsClientListSecretsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return WorkspaceConnectionsClientListSecretsResponse{}, runtime.NewResponseError(resp) + } + return client.listSecretsHandleResponse(resp) +} + +// listSecretsCreateRequest creates the ListSecrets request. +func (client *WorkspaceConnectionsClient) listSecretsCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, options *WorkspaceConnectionsClientListSecretsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/listsecrets" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if connectionName == "" { + return nil, errors.New("parameter connectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{connectionName}", url.PathEscape(connectionName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecretsHandleResponse handles the ListSecrets response. +func (client *WorkspaceConnectionsClient) listSecretsHandleResponse(resp *http.Response) (WorkspaceConnectionsClientListSecretsResponse, error) { + result := WorkspaceConnectionsClientListSecretsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.WorkspaceConnectionPropertiesV2BasicResource); err != nil { + return WorkspaceConnectionsClientListSecretsResponse{}, err + } + return result, nil +} + +// Update - Update machine learning workspaces connections under the specified workspace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-06-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - workspaceName - Name of Azure Machine Learning workspace. +// - connectionName - Friendly name of the workspace connection +// - options - WorkspaceConnectionsClientUpdateOptions contains the optional parameters for the WorkspaceConnectionsClient.Update +// method. +func (client *WorkspaceConnectionsClient) Update(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, options *WorkspaceConnectionsClientUpdateOptions) (WorkspaceConnectionsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, connectionName, options) + if err != nil { + return WorkspaceConnectionsClientUpdateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return WorkspaceConnectionsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return WorkspaceConnectionsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *WorkspaceConnectionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, connectionName string, options *WorkspaceConnectionsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if workspaceName == "" { + return nil, errors.New("parameter workspaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceName}", url.PathEscape(workspaceName)) + if connectionName == "" { + return nil, errors.New("parameter connectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{connectionName}", url.PathEscape(connectionName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-06-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Body != nil { + return req, runtime.MarshalAsJSON(req, *options.Body) + } + return req, nil +} + +// updateHandleResponse handles the Update response. +func (client *WorkspaceConnectionsClient) updateHandleResponse(resp *http.Response) (WorkspaceConnectionsClientUpdateResponse, error) { + result := WorkspaceConnectionsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.WorkspaceConnectionPropertiesV2BasicResource); err != nil { + return WorkspaceConnectionsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaceconnections_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/workspaceconnections_client_example_test.go deleted file mode 100644 index 6bb9fbadb3ec..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaceconnections_client_example_test.go +++ /dev/null @@ -1,152 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/WorkspaceConnection/create.json -func ExampleWorkspaceConnectionsClient_Create() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspaceConnectionsClient().Create(ctx, "resourceGroup-1", "workspace-1", "connection-1", armmachinelearning.WorkspaceConnectionPropertiesV2BasicResource{ - Properties: &armmachinelearning.NoneAuthTypeWorkspaceConnectionProperties{ - AuthType: to.Ptr(armmachinelearning.ConnectionAuthTypeNone), - Category: to.Ptr(armmachinelearning.ConnectionCategoryContainerRegistry), - Target: to.Ptr("www.facebook.com"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.WorkspaceConnectionPropertiesV2BasicResource = armmachinelearning.WorkspaceConnectionPropertiesV2BasicResource{ - // Name: to.Ptr("connection-1"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/connections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup-1/providers/Microsoft.MachineLearningServices/workspaces/workspace-1/connections/connection-1"), - // Properties: &armmachinelearning.NoneAuthTypeWorkspaceConnectionProperties{ - // AuthType: to.Ptr(armmachinelearning.ConnectionAuthTypeNone), - // Category: to.Ptr(armmachinelearning.ConnectionCategoryContainerRegistry), - // Target: to.Ptr("www.facebook.com"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/WorkspaceConnection/get.json -func ExampleWorkspaceConnectionsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspaceConnectionsClient().Get(ctx, "resourceGroup-1", "workspace-1", "connection-1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.WorkspaceConnectionPropertiesV2BasicResource = armmachinelearning.WorkspaceConnectionPropertiesV2BasicResource{ - // Name: to.Ptr("connection-1"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/connections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup-1/providers/Microsoft.MachineLearningServices/workspaces/workspace-1/connections/connection-1"), - // Properties: &armmachinelearning.NoneAuthTypeWorkspaceConnectionProperties{ - // AuthType: to.Ptr(armmachinelearning.ConnectionAuthTypeNone), - // Category: to.Ptr(armmachinelearning.ConnectionCategoryContainerRegistry), - // Target: to.Ptr("www.facebook.com"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/WorkspaceConnection/delete.json -func ExampleWorkspaceConnectionsClient_Delete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewWorkspaceConnectionsClient().Delete(ctx, "resourceGroup-1", "workspace-1", "connection-1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/WorkspaceConnection/list.json -func ExampleWorkspaceConnectionsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewWorkspaceConnectionsClient().NewListPager("resourceGroup-1", "workspace-1", &armmachinelearning.WorkspaceConnectionsClientListOptions{Target: to.Ptr("www.facebook.com"), - Category: to.Ptr("ContainerRegistry"), - }) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult = armmachinelearning.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult{ - // Value: []*armmachinelearning.WorkspaceConnectionPropertiesV2BasicResource{ - // { - // Name: to.Ptr("connection-1"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/connections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup-1/providers/Microsoft.MachineLearningServices/workspaces/workspace-1/linkedWorkspaces/connection-1"), - // Properties: &armmachinelearning.PATAuthTypeWorkspaceConnectionProperties{ - // AuthType: to.Ptr(armmachinelearning.ConnectionAuthTypePAT), - // Category: to.Ptr(armmachinelearning.ConnectionCategoryContainerRegistry), - // Target: to.Ptr("www.facebook.com"), - // }, - // }, - // { - // Name: to.Ptr("connection-2"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/connections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup-1/providers/Microsoft.MachineLearningServices/workspaces/workspace-1/linkedWorkspaces/connection-2"), - // Properties: &armmachinelearning.PATAuthTypeWorkspaceConnectionProperties{ - // AuthType: to.Ptr(armmachinelearning.ConnectionAuthTypePAT), - // Category: to.Ptr(armmachinelearning.ConnectionCategoryContainerRegistry), - // Target: to.Ptr("www.facebook.com"), - // }, - // }}, - // } - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/workspacefeatures_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/workspacefeatures_client.go index 92b9911a4aa7..a8ae1556b3e3 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/workspacefeatures_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/workspacefeatures_client.go @@ -46,7 +46,7 @@ func NewWorkspaceFeaturesClient(subscriptionID string, credential azcore.TokenCr // NewListPager - Lists all enabled features for a workspace // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspaceFeaturesClientListOptions contains the optional parameters for the WorkspaceFeaturesClient.NewListPager @@ -99,7 +99,7 @@ func (client *WorkspaceFeaturesClient) listCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/workspacefeatures_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/workspacefeatures_client_example_test.go deleted file mode 100644 index 813ee3f07a2c..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/workspacefeatures_client_example_test.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/WorkspaceFeature/list.json -func ExampleWorkspaceFeaturesClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewWorkspaceFeaturesClient().NewListPager("myResourceGroup", "testworkspace", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ListAmlUserFeatureResult = armmachinelearning.ListAmlUserFeatureResult{ - // Value: []*armmachinelearning.AmlUserFeature{ - // { - // Description: to.Ptr("Create, edit or delete AutoML experiments in the SDK"), - // DisplayName: to.Ptr("Create edit experiments UI"), - // ID: to.Ptr("automatedml_createeditexperimentsui"), - // }, - // { - // Description: to.Ptr("Upgrade workspace from Basic to enterprise from the UI"), - // DisplayName: to.Ptr("Upgrade workspace UI"), - // ID: to.Ptr("workspace_upgradeworkspaceui"), - // }}, - // } - } -} diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaces_client.go b/sdk/resourcemanager/machinelearning/armmachinelearning/workspaces_client.go index bef17682289d..8c813619747d 100644 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaces_client.go +++ b/sdk/resourcemanager/machinelearning/armmachinelearning/workspaces_client.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "net/http" "net/url" + "strconv" "strings" ) @@ -47,19 +48,21 @@ func NewWorkspacesClient(subscriptionID string, credential azcore.TokenCredentia // BeginCreateOrUpdate - Creates or updates a workspace with the specified parameters. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. -// - parameters - The parameters for creating or updating a machine learning workspace. +// - body - The parameters for creating or updating a machine learning workspace. // - options - WorkspacesClientBeginCreateOrUpdateOptions contains the optional parameters for the WorkspacesClient.BeginCreateOrUpdate // method. -func (client *WorkspacesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters Workspace, options *WorkspacesClientBeginCreateOrUpdateOptions) (*runtime.Poller[WorkspacesClientCreateOrUpdateResponse], error) { +func (client *WorkspacesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, body Workspace, options *WorkspacesClientBeginCreateOrUpdateOptions) (*runtime.Poller[WorkspacesClientCreateOrUpdateResponse], error) { if options == nil || options.ResumeToken == "" { - resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, parameters, options) + resp, err := client.createOrUpdate(ctx, resourceGroupName, workspaceName, body, options) if err != nil { return nil, err } - return runtime.NewPoller[WorkspacesClientCreateOrUpdateResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WorkspacesClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[WorkspacesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) } @@ -68,9 +71,9 @@ func (client *WorkspacesClient) BeginCreateOrUpdate(ctx context.Context, resourc // CreateOrUpdate - Creates or updates a workspace with the specified parameters. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 -func (client *WorkspacesClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters Workspace, options *WorkspacesClientBeginCreateOrUpdateOptions) (*http.Response, error) { - req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, parameters, options) +// Generated from API version 2023-06-01-preview +func (client *WorkspacesClient) createOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, body Workspace, options *WorkspacesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, workspaceName, body, options) if err != nil { return nil, err } @@ -85,7 +88,7 @@ func (client *WorkspacesClient) createOrUpdate(ctx context.Context, resourceGrou } // createOrUpdateCreateRequest creates the CreateOrUpdate request. -func (client *WorkspacesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, parameters Workspace, options *WorkspacesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { +func (client *WorkspacesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, body Workspace, options *WorkspacesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") @@ -104,16 +107,16 @@ func (client *WorkspacesClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} - return req, runtime.MarshalAsJSON(req, parameters) + return req, runtime.MarshalAsJSON(req, body) } // BeginDelete - Deletes a machine learning workspace. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientBeginDeleteOptions contains the optional parameters for the WorkspacesClient.BeginDelete method. @@ -132,7 +135,7 @@ func (client *WorkspacesClient) BeginDelete(ctx context.Context, resourceGroupNa // Delete - Deletes a machine learning workspace. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *WorkspacesClient) deleteOperation(ctx context.Context, resourceGroupName string, workspaceName string, options *WorkspacesClientBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, workspaceName, options) if err != nil { @@ -168,7 +171,10 @@ func (client *WorkspacesClient) deleteCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.ForceToPurge != nil { + reqQP.Set("forceToPurge", strconv.FormatBool(*options.ForceToPurge)) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -177,7 +183,7 @@ func (client *WorkspacesClient) deleteCreateRequest(ctx context.Context, resourc // BeginDiagnose - Diagnose workspace setup issue. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientBeginDiagnoseOptions contains the optional parameters for the WorkspacesClient.BeginDiagnose @@ -199,7 +205,7 @@ func (client *WorkspacesClient) BeginDiagnose(ctx context.Context, resourceGroup // Diagnose - Diagnose workspace setup issue. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *WorkspacesClient) diagnose(ctx context.Context, resourceGroupName string, workspaceName string, options *WorkspacesClientBeginDiagnoseOptions) (*http.Response, error) { req, err := client.diagnoseCreateRequest(ctx, resourceGroupName, workspaceName, options) if err != nil { @@ -235,11 +241,11 @@ func (client *WorkspacesClient) diagnoseCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} - if options != nil && options.Parameters != nil { - return req, runtime.MarshalAsJSON(req, *options.Parameters) + if options != nil && options.Body != nil { + return req, runtime.MarshalAsJSON(req, *options.Body) } return req, nil } @@ -247,7 +253,7 @@ func (client *WorkspacesClient) diagnoseCreateRequest(ctx context.Context, resou // Get - Gets the properties of the specified machine learning workspace. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientGetOptions contains the optional parameters for the WorkspacesClient.Get method. @@ -286,7 +292,7 @@ func (client *WorkspacesClient) getCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -303,7 +309,7 @@ func (client *WorkspacesClient) getHandleResponse(resp *http.Response) (Workspac // NewListByResourceGroupPager - Lists all the available machine learning workspaces under the specified resource group. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - WorkspacesClientListByResourceGroupOptions contains the optional parameters for the WorkspacesClient.NewListByResourceGroupPager // method. @@ -351,10 +357,13 @@ func (client *WorkspacesClient) listByResourceGroupCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } + if options != nil && options.Kind != nil { + reqQP.Set("kind", *options.Kind) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -371,7 +380,7 @@ func (client *WorkspacesClient) listByResourceGroupHandleResponse(resp *http.Res // NewListBySubscriptionPager - Lists all the available machine learning workspaces under the specified subscription. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - options - WorkspacesClientListBySubscriptionOptions contains the optional parameters for the WorkspacesClient.NewListBySubscriptionPager // method. func (client *WorkspacesClient) NewListBySubscriptionPager(options *WorkspacesClientListBySubscriptionOptions) *runtime.Pager[WorkspacesClientListBySubscriptionResponse] { @@ -414,10 +423,13 @@ func (client *WorkspacesClient) listBySubscriptionCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") if options != nil && options.Skip != nil { reqQP.Set("$skip", *options.Skip) } + if options != nil && options.Kind != nil { + reqQP.Set("kind", *options.Kind) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -433,10 +445,10 @@ func (client *WorkspacesClient) listBySubscriptionHandleResponse(resp *http.Resp } // ListKeys - Lists all the keys associated with this workspace. This includes keys for the storage account, app insights -// and password for container registry +// and password for container registry. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientListKeysOptions contains the optional parameters for the WorkspacesClient.ListKeys method. @@ -475,7 +487,7 @@ func (client *WorkspacesClient) listKeysCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -490,10 +502,10 @@ func (client *WorkspacesClient) listKeysHandleResponse(resp *http.Response) (Wor return result, nil } -// ListNotebookAccessToken - return notebook access token and refresh token +// ListNotebookAccessToken - Get Azure Machine Learning Workspace notebook access token // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientListNotebookAccessTokenOptions contains the optional parameters for the WorkspacesClient.ListNotebookAccessToken @@ -533,7 +545,7 @@ func (client *WorkspacesClient) listNotebookAccessTokenCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -548,10 +560,10 @@ func (client *WorkspacesClient) listNotebookAccessTokenHandleResponse(resp *http return result, nil } -// ListNotebookKeys - List keys of a notebook. +// ListNotebookKeys - Lists keys of Azure Machine Learning Workspaces notebook. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientListNotebookKeysOptions contains the optional parameters for the WorkspacesClient.ListNotebookKeys @@ -591,7 +603,7 @@ func (client *WorkspacesClient) listNotebookKeysCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -610,7 +622,7 @@ func (client *WorkspacesClient) listNotebookKeysHandleResponse(resp *http.Respon // (FQDNs) programmatically. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientListOutboundNetworkDependenciesEndpointsOptions contains the optional parameters for the WorkspacesClient.ListOutboundNetworkDependenciesEndpoints @@ -650,7 +662,7 @@ func (client *WorkspacesClient) listOutboundNetworkDependenciesEndpointsCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -665,10 +677,10 @@ func (client *WorkspacesClient) listOutboundNetworkDependenciesEndpointsHandleRe return result, nil } -// ListStorageAccountKeys - List storage account keys of a workspace. +// ListStorageAccountKeys - Lists keys of Azure Machine Learning Workspace's storage account. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientListStorageAccountKeysOptions contains the optional parameters for the WorkspacesClient.ListStorageAccountKeys @@ -708,7 +720,7 @@ func (client *WorkspacesClient) listStorageAccountKeysCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -723,10 +735,10 @@ func (client *WorkspacesClient) listStorageAccountKeysHandleResponse(resp *http. return result, nil } -// BeginPrepareNotebook - Prepare a notebook. +// BeginPrepareNotebook - Prepare Azure Machine Learning Workspace's notebook resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientBeginPrepareNotebookOptions contains the optional parameters for the WorkspacesClient.BeginPrepareNotebook @@ -745,10 +757,10 @@ func (client *WorkspacesClient) BeginPrepareNotebook(ctx context.Context, resour } } -// PrepareNotebook - Prepare a notebook. +// PrepareNotebook - Prepare Azure Machine Learning Workspace's notebook resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *WorkspacesClient) prepareNotebook(ctx context.Context, resourceGroupName string, workspaceName string, options *WorkspacesClientBeginPrepareNotebookOptions) (*http.Response, error) { req, err := client.prepareNotebookCreateRequest(ctx, resourceGroupName, workspaceName, options) if err != nil { @@ -784,17 +796,17 @@ func (client *WorkspacesClient) prepareNotebookCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil } -// BeginResyncKeys - Resync all the keys associated with this workspace. This includes keys for the storage account, app insights +// BeginResyncKeys - Resync all the keys associated with this workspace.This includes keys for the storage account, app insights // and password for container registry // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. // - options - WorkspacesClientBeginResyncKeysOptions contains the optional parameters for the WorkspacesClient.BeginResyncKeys @@ -805,17 +817,19 @@ func (client *WorkspacesClient) BeginResyncKeys(ctx context.Context, resourceGro if err != nil { return nil, err } - return runtime.NewPoller[WorkspacesClientResyncKeysResponse](resp, client.internal.Pipeline(), nil) + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WorkspacesClientResyncKeysResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) } else { return runtime.NewPollerFromResumeToken[WorkspacesClientResyncKeysResponse](options.ResumeToken, client.internal.Pipeline(), nil) } } -// ResyncKeys - Resync all the keys associated with this workspace. This includes keys for the storage account, app insights +// ResyncKeys - Resync all the keys associated with this workspace.This includes keys for the storage account, app insights // and password for container registry // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview func (client *WorkspacesClient) resyncKeys(ctx context.Context, resourceGroupName string, workspaceName string, options *WorkspacesClientBeginResyncKeysOptions) (*http.Response, error) { req, err := client.resyncKeysCreateRequest(ctx, resourceGroupName, workspaceName, options) if err != nil { @@ -851,7 +865,7 @@ func (client *WorkspacesClient) resyncKeysCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -860,14 +874,14 @@ func (client *WorkspacesClient) resyncKeysCreateRequest(ctx context.Context, res // BeginUpdate - Updates a machine learning workspace with the specified parameters. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 +// Generated from API version 2023-06-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - workspaceName - Name of Azure Machine Learning workspace. -// - parameters - The parameters for updating a machine learning workspace. +// - body - The parameters for updating a machine learning workspace. // - options - WorkspacesClientBeginUpdateOptions contains the optional parameters for the WorkspacesClient.BeginUpdate method. -func (client *WorkspacesClient) BeginUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters WorkspaceUpdateParameters, options *WorkspacesClientBeginUpdateOptions) (*runtime.Poller[WorkspacesClientUpdateResponse], error) { +func (client *WorkspacesClient) BeginUpdate(ctx context.Context, resourceGroupName string, workspaceName string, body WorkspaceUpdateParameters, options *WorkspacesClientBeginUpdateOptions) (*runtime.Poller[WorkspacesClientUpdateResponse], error) { if options == nil || options.ResumeToken == "" { - resp, err := client.update(ctx, resourceGroupName, workspaceName, parameters, options) + resp, err := client.update(ctx, resourceGroupName, workspaceName, body, options) if err != nil { return nil, err } @@ -880,9 +894,9 @@ func (client *WorkspacesClient) BeginUpdate(ctx context.Context, resourceGroupNa // Update - Updates a machine learning workspace with the specified parameters. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-10-01 -func (client *WorkspacesClient) update(ctx context.Context, resourceGroupName string, workspaceName string, parameters WorkspaceUpdateParameters, options *WorkspacesClientBeginUpdateOptions) (*http.Response, error) { - req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, parameters, options) +// Generated from API version 2023-06-01-preview +func (client *WorkspacesClient) update(ctx context.Context, resourceGroupName string, workspaceName string, body WorkspaceUpdateParameters, options *WorkspacesClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, workspaceName, body, options) if err != nil { return nil, err } @@ -897,7 +911,7 @@ func (client *WorkspacesClient) update(ctx context.Context, resourceGroupName st } // updateCreateRequest creates the Update request. -func (client *WorkspacesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, parameters WorkspaceUpdateParameters, options *WorkspacesClientBeginUpdateOptions) (*policy.Request, error) { +func (client *WorkspacesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, body WorkspaceUpdateParameters, options *WorkspacesClientBeginUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") @@ -916,8 +930,8 @@ func (client *WorkspacesClient) updateCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-10-01") + reqQP.Set("api-version", "2023-06-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} - return req, runtime.MarshalAsJSON(req, parameters) + return req, runtime.MarshalAsJSON(req, body) } diff --git a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaces_client_example_test.go b/sdk/resourcemanager/machinelearning/armmachinelearning/workspaces_client_example_test.go deleted file mode 100644 index 59167ae0b3e4..000000000000 --- a/sdk/resourcemanager/machinelearning/armmachinelearning/workspaces_client_example_test.go +++ /dev/null @@ -1,683 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armmachinelearning_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/machinelearning/armmachinelearning/v3" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/get.json -func ExampleWorkspacesClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspacesClient().Get(ctx, "workspace-1234", "testworkspace", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Workspace = armmachinelearning.Workspace{ - // Name: to.Ptr("testworkspace"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace"), - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssignedUserAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testuai": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Location: to.Ptr("eastus2euap"), - // Properties: &armmachinelearning.WorkspaceProperties{ - // Description: to.Ptr("test description"), - // AllowPublicAccessWhenBehindVnet: to.Ptr(false), - // ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - // ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistry"), - // DiscoveryURL: to.Ptr("http://example.com"), - // Encryption: &armmachinelearning.EncryptionProperty{ - // Identity: &armmachinelearning.IdentityForCmk{ - // UserAssignedIdentity: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testuai"), - // }, - // KeyVaultProperties: &armmachinelearning.EncryptionKeyVaultProperties{ - // IdentityClientID: to.Ptr(""), - // KeyIdentifier: to.Ptr("https://testkv.vault.azure.net/keys/testkey/aabbccddee112233445566778899aabb"), - // KeyVaultArmID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - // }, - // Status: to.Ptr(armmachinelearning.EncryptionStatusEnabled), - // }, - // FriendlyName: to.Ptr("HelloName"), - // HbiWorkspace: to.Ptr(false), - // ImageBuildCompute: to.Ptr("testcompute"), - // KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - // PrivateEndpointConnections: []*armmachinelearning.PrivateEndpointConnection{ - // { - // Name: to.Ptr("testprivatelinkconnection"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces/privateEndpointConnections"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/privateEndpointConnections/testprivatelinkconnection"), - // Properties: &armmachinelearning.PrivateEndpointConnectionProperties{ - // PrivateEndpoint: &armmachinelearning.PrivateEndpoint{ - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/rg-1234/providers/Microsoft.Network/privateEndpoints/petest01"), - // }, - // PrivateLinkServiceConnectionState: &armmachinelearning.PrivateLinkServiceConnectionState{ - // Description: to.Ptr("Auto-Approved"), - // ActionsRequired: to.Ptr("None"), - // Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - // }, - // ProvisioningState: to.Ptr(armmachinelearning.PrivateEndpointConnectionProvisioningStateSucceeded), - // }, - // }}, - // PrivateLinkCount: to.Ptr[int32](0), - // PublicNetworkAccess: to.Ptr(armmachinelearning.PublicNetworkAccessDisabled), - // ServiceProvisionedResourceGroup: to.Ptr("testworkspace_0000111122223333"), - // SharedPrivateLinkResources: []*armmachinelearning.SharedPrivateLinkResource{ - // { - // Name: to.Ptr("testcosmosdbresource"), - // Properties: &armmachinelearning.SharedPrivateLinkResourceProperty{ - // GroupID: to.Ptr("Sql"), - // PrivateLinkResourceID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.DocumentDB/databaseAccounts/testcosmosdbresource/privateLinkResources/Sql"), - // RequestMessage: to.Ptr("Please approve"), - // Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - // }, - // }}, - // StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccount"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/create.json -func ExampleWorkspacesClient_BeginCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewWorkspacesClient().BeginCreateOrUpdate(ctx, "workspace-1234", "testworkspace", armmachinelearning.Workspace{ - Identity: &armmachinelearning.ManagedServiceIdentity{ - Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssignedUserAssigned), - UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testuai": {}, - }, - }, - Location: to.Ptr("eastus2euap"), - Properties: &armmachinelearning.WorkspaceProperties{ - Description: to.Ptr("test description"), - ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistry"), - Encryption: &armmachinelearning.EncryptionProperty{ - Identity: &armmachinelearning.IdentityForCmk{ - UserAssignedIdentity: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testuai"), - }, - KeyVaultProperties: &armmachinelearning.EncryptionKeyVaultProperties{ - IdentityClientID: to.Ptr(""), - KeyIdentifier: to.Ptr("https://testkv.vault.azure.net/keys/testkey/aabbccddee112233445566778899aabb"), - KeyVaultArmID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - }, - Status: to.Ptr(armmachinelearning.EncryptionStatusEnabled), - }, - FriendlyName: to.Ptr("HelloName"), - HbiWorkspace: to.Ptr(false), - KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - SharedPrivateLinkResources: []*armmachinelearning.SharedPrivateLinkResource{ - { - Name: to.Ptr("testdbresource"), - Properties: &armmachinelearning.SharedPrivateLinkResourceProperty{ - GroupID: to.Ptr("Sql"), - PrivateLinkResourceID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.DocumentDB/databaseAccounts/testdbresource/privateLinkResources/Sql"), - RequestMessage: to.Ptr("Please approve"), - Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - }, - }}, - StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccount"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Workspace = armmachinelearning.Workspace{ - // Name: to.Ptr("testworkspace"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace"), - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssignedUserAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // UserAssignedIdentities: map[string]*armmachinelearning.UserAssignedIdentity{ - // "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testuai": &armmachinelearning.UserAssignedIdentity{ - // ClientID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // }, - // }, - // Location: to.Ptr("eastus2euap"), - // Properties: &armmachinelearning.WorkspaceProperties{ - // Description: to.Ptr("test description"), - // AllowPublicAccessWhenBehindVnet: to.Ptr(false), - // ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - // ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistry"), - // DiscoveryURL: to.Ptr("http://example.com"), - // Encryption: &armmachinelearning.EncryptionProperty{ - // Identity: &armmachinelearning.IdentityForCmk{ - // UserAssignedIdentity: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testuai"), - // }, - // KeyVaultProperties: &armmachinelearning.EncryptionKeyVaultProperties{ - // IdentityClientID: to.Ptr(""), - // KeyIdentifier: to.Ptr("https://testkv.vault.azure.net/keys/testkey/aabbccddee112233445566778899aabb"), - // KeyVaultArmID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - // }, - // Status: to.Ptr(armmachinelearning.EncryptionStatusEnabled), - // }, - // FriendlyName: to.Ptr("HelloName"), - // HbiWorkspace: to.Ptr(false), - // KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - // PublicNetworkAccess: to.Ptr(armmachinelearning.PublicNetworkAccessDisabled), - // SharedPrivateLinkResources: []*armmachinelearning.SharedPrivateLinkResource{ - // { - // Name: to.Ptr("testdbresource"), - // Properties: &armmachinelearning.SharedPrivateLinkResourceProperty{ - // GroupID: to.Ptr("Sql"), - // PrivateLinkResourceID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.DocumentDB/databaseAccounts/testdbresource/privateLinkResources/Sql"), - // RequestMessage: to.Ptr("Please approve"), - // Status: to.Ptr(armmachinelearning.PrivateEndpointServiceConnectionStatusApproved), - // }, - // }}, - // StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccount"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/delete.json -func ExampleWorkspacesClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewWorkspacesClient().BeginDelete(ctx, "workspace-1234", "testworkspace", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/update.json -func ExampleWorkspacesClient_BeginUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewWorkspacesClient().BeginUpdate(ctx, "workspace-1234", "testworkspace", armmachinelearning.WorkspaceUpdateParameters{ - Properties: &armmachinelearning.WorkspacePropertiesUpdateParameters{ - Description: to.Ptr("new description"), - FriendlyName: to.Ptr("New friendly name"), - PublicNetworkAccess: to.Ptr(armmachinelearning.PublicNetworkAccessDisabled), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Workspace = armmachinelearning.Workspace{ - // Name: to.Ptr("testworkspace"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace"), - // Identity: &armmachinelearning.ManagedServiceIdentity{ - // Type: to.Ptr(armmachinelearning.ManagedServiceIdentityTypeSystemAssigned), - // PrincipalID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // TenantID: to.Ptr("00000000-1111-2222-3333-444444444444"), - // }, - // Location: to.Ptr("eastus2euap"), - // Properties: &armmachinelearning.WorkspaceProperties{ - // Description: to.Ptr("new description"), - // ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - // ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistry"), - // DiscoveryURL: to.Ptr("http://example.com"), - // FriendlyName: to.Ptr("New friendly name"), - // KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - // PublicNetworkAccess: to.Ptr(armmachinelearning.PublicNetworkAccessDisabled), - // StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccount"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/listByResourceGroup.json -func ExampleWorkspacesClient_NewListByResourceGroupPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewWorkspacesClient().NewListByResourceGroupPager("workspace-1234", &armmachinelearning.WorkspacesClientListByResourceGroupOptions{Skip: nil}) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.WorkspaceListResult = armmachinelearning.WorkspaceListResult{ - // Value: []*armmachinelearning.Workspace{ - // { - // Name: to.Ptr("testworkspace"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace"), - // Location: to.Ptr("eastus2euap"), - // Properties: &armmachinelearning.WorkspaceProperties{ - // Description: to.Ptr("test description"), - // ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - // ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistry"), - // DiscoveryURL: to.Ptr("http://example.com"), - // FriendlyName: to.Ptr("HelloName"), - // KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - // StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccount"), - // }, - // }, - // { - // Name: to.Ptr("testworkspace1"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace1"), - // Location: to.Ptr("eastus2euap"), - // Properties: &armmachinelearning.WorkspaceProperties{ - // Description: to.Ptr("test description"), - // ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - // ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistryNew"), - // DiscoveryURL: to.Ptr("http://example.com"), - // FriendlyName: to.Ptr("HelloName 1"), - // KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkvNew"), - // StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccountOld"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/diagnose.json -func ExampleWorkspacesClient_BeginDiagnose() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewWorkspacesClient().BeginDiagnose(ctx, "workspace-1234", "testworkspace", &armmachinelearning.WorkspacesClientBeginDiagnoseOptions{Parameters: &armmachinelearning.DiagnoseWorkspaceParameters{ - Value: &armmachinelearning.DiagnoseRequestProperties{ - ApplicationInsights: map[string]any{}, - ContainerRegistry: map[string]any{}, - DNSResolution: map[string]any{}, - KeyVault: map[string]any{}, - Nsg: map[string]any{}, - Others: map[string]any{}, - ResourceLock: map[string]any{}, - StorageAccount: map[string]any{}, - Udr: map[string]any{}, - }, - }, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.DiagnoseResponseResult = armmachinelearning.DiagnoseResponseResult{ - // Value: &armmachinelearning.DiagnoseResponseResultValue{ - // ApplicationInsightsResults: []*armmachinelearning.DiagnoseResult{ - // }, - // ContainerRegistryResults: []*armmachinelearning.DiagnoseResult{ - // }, - // DNSResolutionResults: []*armmachinelearning.DiagnoseResult{ - // { - // Code: to.Ptr("CustomDNSInUse"), - // Level: to.Ptr(armmachinelearning.DiagnoseResultLevelWarning), - // Message: to.Ptr("We have detected an on-premise dns server is configured. Please make sure conditional forwarding is configured correctly according to doc https://foo"), - // }}, - // KeyVaultResults: []*armmachinelearning.DiagnoseResult{ - // }, - // NetworkSecurityRuleResults: []*armmachinelearning.DiagnoseResult{ - // }, - // OtherResults: []*armmachinelearning.DiagnoseResult{ - // }, - // ResourceLockResults: []*armmachinelearning.DiagnoseResult{ - // }, - // StorageAccountResults: []*armmachinelearning.DiagnoseResult{ - // }, - // UserDefinedRouteResults: []*armmachinelearning.DiagnoseResult{ - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/listKeys.json -func ExampleWorkspacesClient_ListKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspacesClient().ListKeys(ctx, "testrg123", "workspaces123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ListWorkspaceKeysResult = armmachinelearning.ListWorkspaceKeysResult{ - // ContainerRegistryCredentials: &armmachinelearning.RegistryListCredentialsResult{ - // Passwords: []*armmachinelearning.Password{ - // { - // Name: to.Ptr("password"), - // Value: to.Ptr(""), - // }, - // { - // Name: to.Ptr("password2"), - // Value: to.Ptr("0KARRQoQHSUq1yViPWg7YFernOS=Ic/t"), - // }}, - // Username: to.Ptr("testdemoworkjmjmeykp"), - // }, - // NotebookAccessKeys: &armmachinelearning.ListNotebookKeysResult{ - // }, - // UserStorageResourceID: to.Ptr("/subscriptions/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/resourceGroups/ragargeastus2euap/providers/Microsoft.Storage/storageAccounts/testdemoworkazashomr"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/resyncKeys.json -func ExampleWorkspacesClient_BeginResyncKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewWorkspacesClient().BeginResyncKeys(ctx, "testrg123", "workspaces123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/listBySubscription.json -func ExampleWorkspacesClient_NewListBySubscriptionPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewWorkspacesClient().NewListBySubscriptionPager(&armmachinelearning.WorkspacesClientListBySubscriptionOptions{Skip: nil}) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.WorkspaceListResult = armmachinelearning.WorkspaceListResult{ - // Value: []*armmachinelearning.Workspace{ - // { - // Name: to.Ptr("testworkspace"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace"), - // Location: to.Ptr("eastus2euap"), - // Properties: &armmachinelearning.WorkspaceProperties{ - // Description: to.Ptr("test description"), - // ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - // ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistry"), - // DiscoveryURL: to.Ptr("http://example.com"), - // FriendlyName: to.Ptr("HelloName"), - // KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkv"), - // StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccount"), - // }, - // }, - // { - // Name: to.Ptr("testworkspace"), - // Type: to.Ptr("Microsoft.MachineLearningServices/workspaces"), - // ID: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-5678/providers/Microsoft.MachineLearningServices/workspaces/testworkspace"), - // Location: to.Ptr("eastus2euap"), - // Properties: &armmachinelearning.WorkspaceProperties{ - // Description: to.Ptr("test description"), - // ApplicationInsights: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/microsoft.insights/components/testinsights"), - // ContainerRegistry: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.ContainerRegistry/registries/testRegistryNew"), - // DiscoveryURL: to.Ptr("http://example.com"), - // FriendlyName: to.Ptr("HelloName"), - // KeyVault: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/workspace-1234/providers/Microsoft.KeyVault/vaults/testkvNew"), - // StorageAccount: to.Ptr("/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/accountcrud-1234/providers/Microsoft.Storage/storageAccounts/testStorageAccountOld"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/listNotebookAccessToken.json -func ExampleWorkspacesClient_ListNotebookAccessToken() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspacesClient().ListNotebookAccessToken(ctx, "workspace-1234", "testworkspace", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.NotebookAccessTokenResult = armmachinelearning.NotebookAccessTokenResult{ - // ExpiresIn: to.Ptr[int32](28800), - // HostName: to.Ptr("Host product name"), - // NotebookResourceID: to.Ptr("94350843095843059"), - // PublicDNS: to.Ptr("resource.notebooks.azure.net"), - // Scope: to.Ptr("aznb_identity"), - // TokenType: to.Ptr("Bearer"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Notebook/prepare.json -func ExampleWorkspacesClient_BeginPrepareNotebook() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewWorkspacesClient().BeginPrepareNotebook(ctx, "testrg123", "workspaces123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.NotebookResourceInfo = armmachinelearning.NotebookResourceInfo{ - // Fqdn: to.Ptr("testnotebook.notebooks.azure.com"), - // NotebookPreparationError: &armmachinelearning.NotebookPreparationError{ - // ErrorMessage: to.Ptr("general error"), - // StatusCode: to.Ptr[int32](500), - // }, - // ResourceID: to.Ptr("aabbccddee112233445566778899"), - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Workspace/listStorageAccountKeys.json -func ExampleWorkspacesClient_ListStorageAccountKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspacesClient().ListStorageAccountKeys(ctx, "testrg123", "workspaces123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ListStorageAccountKeysResult = armmachinelearning.ListStorageAccountKeysResult{ - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/Notebook/listKeys.json -func ExampleWorkspacesClient_ListNotebookKeys() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspacesClient().ListNotebookKeys(ctx, "testrg123", "workspaces123", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ListNotebookKeysResult = armmachinelearning.ListNotebookKeysResult{ - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/aafb0944f7ab936e8cfbad8969bd5eb32263fb4f/specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2022-10-01/examples/ExternalFQDN/get.json -func ExampleWorkspacesClient_ListOutboundNetworkDependenciesEndpoints() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armmachinelearning.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewWorkspacesClient().ListOutboundNetworkDependenciesEndpoints(ctx, "workspace-1234", "testworkspace", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ExternalFQDNResponse = armmachinelearning.ExternalFQDNResponse{ - // Value: []*armmachinelearning.FQDNEndpoints{ - // { - // Properties: &armmachinelearning.FQDNEndpointsProperties{ - // Category: to.Ptr("Azure Active Directory"), - // Endpoints: []*armmachinelearning.FQDNEndpoint{ - // { - // DomainName: to.Ptr("login.microsoftonline.com"), - // EndpointDetails: []*armmachinelearning.FQDNEndpointDetail{ - // { - // Port: to.Ptr[int32](443), - // }}, - // }}, - // }, - // }, - // { - // Properties: &armmachinelearning.FQDNEndpointsProperties{ - // Category: to.Ptr("Azure portal"), - // Endpoints: []*armmachinelearning.FQDNEndpoint{ - // { - // DomainName: to.Ptr("management.azure.com"), - // EndpointDetails: []*armmachinelearning.FQDNEndpointDetail{ - // { - // Port: to.Ptr[int32](443), - // }}, - // }}, - // }, - // }}, - // } -}