Skip to content

Commit

Permalink
feat: add dedicated_resources to DeployedIndex message in aiplatform …
Browse files Browse the repository at this point in the history
…v1 index_endpoint.proto chore: sort imports (#990)

* feat: add dedicated_resources to DeployedIndex message in aiplatform v1 index_endpoint.proto chore: sort imports

PiperOrigin-RevId: 425394497

Source-Link: googleapis/googleapis@bd97e46

Source-Link: googleapis/googleapis-gen@13eed11
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTNlZWQxMTA1MWUxY2Y5ZjlhYjQzZDE3NGYyM2QzNWZmYjMyOTQxYyJ9

* 🦉 Updates from OwlBot

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
gcf-owl-bot[bot] and gcf-owl-bot[bot] authored Jan 31, 2022
1 parent 0ca3747 commit a814923
Show file tree
Hide file tree
Showing 224 changed files with 11,938 additions and 5,626 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -99,21 +99,21 @@ class AutoMlTablesInputs(proto.Message):
operating characteristic (ROC) curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under
the precision-recall curve. "maximize-
precision-at-recall" - Maximize precision for a
specified
the precision-recall curve.
"maximize-precision-at-recall" - Maximize
precision for a specified
recall value. "maximize-recall-at-precision" -
Maximize recall for a specified
precision value.
classification (multi-class):
"minimize-log-loss" (default) - Minimize log
loss.
regression:
"minimize-rmse" (default) - Minimize root-
mean-squared error (RMSE). "minimize-mae" -
Minimize mean-absolute error (MAE). "minimize-
rmsle" - Minimize root-mean-squared log error
(RMSLE).
"minimize-rmse" (default) - Minimize
root-mean-squared error (RMSE). "minimize-mae"
- Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared
log error (RMSLE).
train_budget_milli_node_hours (int):
Required. The train budget of creating this
model, expressed in milli node hours i.e. 1,000
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,21 +99,21 @@ class AutoMlTablesInputs(proto.Message):
operating characteristic (ROC) curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under
the precision-recall curve. "maximize-
precision-at-recall" - Maximize precision for a
specified
the precision-recall curve.
"maximize-precision-at-recall" - Maximize
precision for a specified
recall value. "maximize-recall-at-precision" -
Maximize recall for a specified
precision value.
classification (multi-class):
"minimize-log-loss" (default) - Minimize log
loss.
regression:
"minimize-rmse" (default) - Minimize root-
mean-squared error (RMSE). "minimize-mae" -
Minimize mean-absolute error (MAE). "minimize-
rmsle" - Minimize root-mean-squared log error
(RMSLE).
"minimize-rmse" (default) - Minimize
root-mean-squared error (RMSE). "minimize-mae"
- Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared
log error (RMSLE).
train_budget_milli_node_hours (int):
Required. The train budget of creating this
model, expressed in milli node hours i.e. 1,000
Expand Down
58 changes: 47 additions & 11 deletions google/cloud/aiplatform_v1/services/dataset_service/async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources

from google.api_core.client_options import ClientOptions
Expand Down Expand Up @@ -129,6 +129,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):

from_service_account_json = from_service_account_file

@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return DatasetServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore

@property
def transport(self) -> DatasetServiceTransport:
"""Returns the transport used by the client instance.
Expand Down Expand Up @@ -234,7 +270,7 @@ async def create_dataset(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, dataset])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -315,7 +351,7 @@ async def get_dataset(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -400,7 +436,7 @@ async def update_dataset(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([dataset, update_mask])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -478,7 +514,7 @@ async def list_datasets(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -569,7 +605,7 @@ async def delete_dataset(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -661,7 +697,7 @@ async def import_data(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, import_configs])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -754,7 +790,7 @@ async def export_data(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, export_config])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -839,7 +875,7 @@ async def list_data_items(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -917,7 +953,7 @@ async def get_annotation_spec(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
Expand Down Expand Up @@ -992,7 +1028,7 @@ async def list_annotations(
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
Expand Down
Loading

0 comments on commit a814923

Please sign in to comment.