diff --git a/.github/release-please.yml b/.github/release-please.yml
index 29601ad4..fe749ff6 100644
--- a/.github/release-please.yml
+++ b/.github/release-please.yml
@@ -1,5 +1,6 @@
releaseType: python
handleGHRelease: true
+manifest: true
# NOTE: this section is generated by synthtool.languages.python
# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py
branches:
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
new file mode 100644
index 00000000..5ff7b32d
--- /dev/null
+++ b/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "2.8.3"
+}
diff --git a/docs/videointelligence_v1/types.rst b/docs/videointelligence_v1/types.rst
index 6bada002..b5d39277 100644
--- a/docs/videointelligence_v1/types.rst
+++ b/docs/videointelligence_v1/types.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Videointelligence v1 API
.. automodule:: google.cloud.videointelligence_v1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/videointelligence_v1beta2/types.rst b/docs/videointelligence_v1beta2/types.rst
index 62921eac..47f60c25 100644
--- a/docs/videointelligence_v1beta2/types.rst
+++ b/docs/videointelligence_v1beta2/types.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Videointelligence v1beta2 API
.. automodule:: google.cloud.videointelligence_v1beta2.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/videointelligence_v1p1beta1/types.rst b/docs/videointelligence_v1p1beta1/types.rst
index 2aa52bbf..e9c681c5 100644
--- a/docs/videointelligence_v1p1beta1/types.rst
+++ b/docs/videointelligence_v1p1beta1/types.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Videointelligence v1p1beta1 API
.. automodule:: google.cloud.videointelligence_v1p1beta1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/videointelligence_v1p2beta1/types.rst b/docs/videointelligence_v1p2beta1/types.rst
index 1825803c..30c5462f 100644
--- a/docs/videointelligence_v1p2beta1/types.rst
+++ b/docs/videointelligence_v1p2beta1/types.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Videointelligence v1p2beta1 API
.. automodule:: google.cloud.videointelligence_v1p2beta1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/videointelligence_v1p3beta1/types.rst b/docs/videointelligence_v1p3beta1/types.rst
index 75c97809..4305d588 100644
--- a/docs/videointelligence_v1p3beta1/types.rst
+++ b/docs/videointelligence_v1p3beta1/types.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Videointelligence v1p3beta1 API
.. automodule:: google.cloud.videointelligence_v1p3beta1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/google/cloud/videointelligence/__init__.py b/google/cloud/videointelligence/__init__.py
index 27d43e57..7f23c17c 100644
--- a/google/cloud/videointelligence/__init__.py
+++ b/google/cloud/videointelligence/__init__.py
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.videointelligence import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from google.cloud.videointelligence_v1.services.video_intelligence_service.async_client import (
VideoIntelligenceServiceAsyncClient,
diff --git a/google/cloud/videointelligence/gapic_version.py b/google/cloud/videointelligence/gapic_version.py
new file mode 100644
index 00000000..bb984214
--- /dev/null
+++ b/google/cloud/videointelligence/gapic_version.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+__version__ = "2.8.3" # {x-release-please-version}
diff --git a/google/cloud/videointelligence_v1/__init__.py b/google/cloud/videointelligence_v1/__init__.py
index d99322a7..65030df2 100644
--- a/google/cloud/videointelligence_v1/__init__.py
+++ b/google/cloud/videointelligence_v1/__init__.py
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.videointelligence import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from .services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py
index 6d98f9af..f8f9b47c 100644
--- a/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py
+++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py
@@ -16,7 +16,17 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
@@ -164,9 +174,9 @@ def transport(self) -> VideoIntelligenceServiceTransport:
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -210,12 +220,12 @@ def __init__(
async def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -241,7 +251,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
@@ -255,7 +265,7 @@ async def sample_annotate_video():
print(response)
Args:
- request (Union[google.cloud.videointelligence_v1.types.AnnotateVideoRequest, dict]):
+ request (Optional[Union[google.cloud.videointelligence_v1.types.AnnotateVideoRequest, dict]]):
The request object. Video annotation request.
input_uri (:class:`str`):
Input video location. Currently, only `Cloud
@@ -276,7 +286,7 @@ async def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (:class:`Sequence[google.cloud.videointelligence_v1.types.Feature]`):
+ features (:class:`MutableSequence[google.cloud.videointelligence_v1.types.Feature]`):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py
index cb613c44..4a1e76dd 100644
--- a/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py
+++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py
@@ -16,7 +16,18 @@
from collections import OrderedDict
import os
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
@@ -60,7 +71,7 @@ class VideoIntelligenceServiceClientMeta(type):
def get_transport_class(
cls,
- label: str = None,
+ label: Optional[str] = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
@@ -313,8 +324,8 @@ def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[Union[str, VideoIntelligenceServiceTransport]] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -328,7 +339,7 @@ def __init__(
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
@@ -358,6 +369,7 @@ def __init__(
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
+ client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
@@ -410,12 +422,12 @@ def __init__(
def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -441,7 +453,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
@@ -476,7 +488,7 @@ def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (Sequence[google.cloud.videointelligence_v1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1.types.Feature]):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py
index 0cb83ae4..e937ad28 100644
--- a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py
+++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py
@@ -49,7 +49,7 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py
index 40320e76..4dea1a22 100644
--- a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py
+++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py
@@ -47,14 +47,14 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[grpc.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
@@ -182,8 +182,8 @@ def __init__(
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py
index 2b08769e..00c14f7b 100644
--- a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py
+++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py
@@ -49,7 +49,7 @@ class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTrans
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -92,15 +92,15 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[aio.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1/types/video_intelligence.py b/google/cloud/videointelligence_v1/types/video_intelligence.py
index e79468a2..c006a126 100644
--- a/google/cloud/videointelligence_v1/types/video_intelligence.py
+++ b/google/cloud/videointelligence_v1/types/video_intelligence.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from typing import MutableMapping, MutableSequence
+
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
@@ -124,7 +126,7 @@ class AnnotateVideoRequest(proto.Message):
The video data bytes. If unset, the input video(s) should be
specified via the ``input_uri``. If set, ``input_uri`` must
be unset.
- features (Sequence[google.cloud.videointelligence_v1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1.types.Feature]):
Required. Requested video annotation
features.
video_context (google.cloud.videointelligence_v1.types.VideoContext):
@@ -146,29 +148,29 @@ class AnnotateVideoRequest(proto.Message):
the region will be determined based on video file location.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- input_content = proto.Field(
+ input_content: bytes = proto.Field(
proto.BYTES,
number=6,
)
- features = proto.RepeatedField(
+ features: MutableSequence["Feature"] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="Feature",
)
- video_context = proto.Field(
+ video_context: "VideoContext" = proto.Field(
proto.MESSAGE,
number=3,
message="VideoContext",
)
- output_uri = proto.Field(
+ output_uri: str = proto.Field(
proto.STRING,
number=4,
)
- location_id = proto.Field(
+ location_id: str = proto.Field(
proto.STRING,
number=5,
)
@@ -178,7 +180,7 @@ class VideoContext(proto.Message):
r"""Video context and/or feature-specific parameters.
Attributes:
- segments (Sequence[google.cloud.videointelligence_v1.types.VideoSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1.types.VideoSegment]):
Video segments to annotate. The segments may
overlap and are not required to be contiguous or
span the whole video. If unspecified, each video
@@ -201,47 +203,47 @@ class VideoContext(proto.Message):
Config for OBJECT_TRACKING.
"""
- segments = proto.RepeatedField(
+ segments: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- label_detection_config = proto.Field(
+ label_detection_config: "LabelDetectionConfig" = proto.Field(
proto.MESSAGE,
number=2,
message="LabelDetectionConfig",
)
- shot_change_detection_config = proto.Field(
+ shot_change_detection_config: "ShotChangeDetectionConfig" = proto.Field(
proto.MESSAGE,
number=3,
message="ShotChangeDetectionConfig",
)
- explicit_content_detection_config = proto.Field(
+ explicit_content_detection_config: "ExplicitContentDetectionConfig" = proto.Field(
proto.MESSAGE,
number=4,
message="ExplicitContentDetectionConfig",
)
- face_detection_config = proto.Field(
+ face_detection_config: "FaceDetectionConfig" = proto.Field(
proto.MESSAGE,
number=5,
message="FaceDetectionConfig",
)
- speech_transcription_config = proto.Field(
+ speech_transcription_config: "SpeechTranscriptionConfig" = proto.Field(
proto.MESSAGE,
number=6,
message="SpeechTranscriptionConfig",
)
- text_detection_config = proto.Field(
+ text_detection_config: "TextDetectionConfig" = proto.Field(
proto.MESSAGE,
number=8,
message="TextDetectionConfig",
)
- person_detection_config = proto.Field(
+ person_detection_config: "PersonDetectionConfig" = proto.Field(
proto.MESSAGE,
number=11,
message="PersonDetectionConfig",
)
- object_tracking_config = proto.Field(
+ object_tracking_config: "ObjectTrackingConfig" = proto.Field(
proto.MESSAGE,
number=13,
message="ObjectTrackingConfig",
@@ -283,24 +285,24 @@ class LabelDetectionConfig(proto.Message):
when we release a new model.
"""
- label_detection_mode = proto.Field(
+ label_detection_mode: "LabelDetectionMode" = proto.Field(
proto.ENUM,
number=1,
enum="LabelDetectionMode",
)
- stationary_camera = proto.Field(
+ stationary_camera: bool = proto.Field(
proto.BOOL,
number=2,
)
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=3,
)
- frame_confidence_threshold = proto.Field(
+ frame_confidence_threshold: float = proto.Field(
proto.FLOAT,
number=4,
)
- video_confidence_threshold = proto.Field(
+ video_confidence_threshold: float = proto.Field(
proto.FLOAT,
number=5,
)
@@ -316,7 +318,7 @@ class ShotChangeDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -332,7 +334,7 @@ class ObjectTrackingConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -355,15 +357,15 @@ class FaceDetectionConfig(proto.Message):
'include_bounding_boxes' is set to false.
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
- include_bounding_boxes = proto.Field(
+ include_bounding_boxes: bool = proto.Field(
proto.BOOL,
number=2,
)
- include_attributes = proto.Field(
+ include_attributes: bool = proto.Field(
proto.BOOL,
number=5,
)
@@ -386,15 +388,15 @@ class PersonDetectionConfig(proto.Message):
'include_bounding_boxes' is set to false.
"""
- include_bounding_boxes = proto.Field(
+ include_bounding_boxes: bool = proto.Field(
proto.BOOL,
number=1,
)
- include_pose_landmarks = proto.Field(
+ include_pose_landmarks: bool = proto.Field(
proto.BOOL,
number=2,
)
- include_attributes = proto.Field(
+ include_attributes: bool = proto.Field(
proto.BOOL,
number=3,
)
@@ -410,7 +412,7 @@ class ExplicitContentDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -420,7 +422,7 @@ class TextDetectionConfig(proto.Message):
r"""Config for TEXT_DETECTION.
Attributes:
- language_hints (Sequence[str]):
+ language_hints (MutableSequence[str]):
Language hint can be specified if the
language to be detected is known a priori. It
can increase the accuracy of the detection.
@@ -435,11 +437,11 @@ class TextDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- language_hints = proto.RepeatedField(
+ language_hints: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=2,
)
@@ -459,12 +461,12 @@ class VideoSegment(proto.Message):
(inclusive).
"""
- start_time_offset = proto.Field(
+ start_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time_offset = proto.Field(
+ end_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -481,12 +483,12 @@ class LabelSegment(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -504,12 +506,12 @@ class LabelFrame(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -529,15 +531,15 @@ class Entity(proto.Message):
Language code for ``description`` in BCP-47 format.
"""
- entity_id = proto.Field(
+ entity_id: str = proto.Field(
proto.STRING,
number=1,
)
- description = proto.Field(
+ description: str = proto.Field(
proto.STRING,
number=2,
)
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=3,
)
@@ -549,41 +551,41 @@ class LabelAnnotation(proto.Message):
Attributes:
entity (google.cloud.videointelligence_v1.types.Entity):
Detected entity.
- category_entities (Sequence[google.cloud.videointelligence_v1.types.Entity]):
+ category_entities (MutableSequence[google.cloud.videointelligence_v1.types.Entity]):
Common categories for the detected entity. For example, when
the label is ``Terrier``, the category is likely ``dog``.
And in some cases there might be more than one categories
e.g., ``Terrier`` could also be a ``pet``.
- segments (Sequence[google.cloud.videointelligence_v1.types.LabelSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1.types.LabelSegment]):
All video segments where a label was
detected.
- frames (Sequence[google.cloud.videointelligence_v1.types.LabelFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1.types.LabelFrame]):
All video frames where a label was detected.
version (str):
Feature version.
"""
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- category_entities = proto.RepeatedField(
+ category_entities: MutableSequence["Entity"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Entity",
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["LabelSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelSegment",
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["LabelFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelFrame",
)
- version = proto.Field(
+ version: str = proto.Field(
proto.STRING,
number=5,
)
@@ -601,12 +603,12 @@ class ExplicitContentFrame(proto.Message):
Likelihood of the pornography content..
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- pornography_likelihood = proto.Field(
+ pornography_likelihood: "Likelihood" = proto.Field(
proto.ENUM,
number=2,
enum="Likelihood",
@@ -619,19 +621,19 @@ class ExplicitContentAnnotation(proto.Message):
frame, no annotations are present for that frame.
Attributes:
- frames (Sequence[google.cloud.videointelligence_v1.types.ExplicitContentFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1.types.ExplicitContentFrame]):
All video frames where explicit content was
detected.
version (str):
Feature version.
"""
- frames = proto.RepeatedField(
+ frames: MutableSequence["ExplicitContentFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="ExplicitContentFrame",
)
- version = proto.Field(
+ version: str = proto.Field(
proto.STRING,
number=2,
)
@@ -652,19 +654,19 @@ class NormalizedBoundingBox(proto.Message):
Bottom Y coordinate.
"""
- left = proto.Field(
+ left: float = proto.Field(
proto.FLOAT,
number=1,
)
- top = proto.Field(
+ top: float = proto.Field(
proto.FLOAT,
number=2,
)
- right = proto.Field(
+ right: float = proto.Field(
proto.FLOAT,
number=3,
)
- bottom = proto.Field(
+ bottom: float = proto.Field(
proto.FLOAT,
number=4,
)
@@ -674,7 +676,7 @@ class FaceDetectionAnnotation(proto.Message):
r"""Face detection annotation.
Attributes:
- tracks (Sequence[google.cloud.videointelligence_v1.types.Track]):
+ tracks (MutableSequence[google.cloud.videointelligence_v1.types.Track]):
The face tracks with attributes.
thumbnail (bytes):
The thumbnail of a person's face.
@@ -682,16 +684,16 @@ class FaceDetectionAnnotation(proto.Message):
Feature version.
"""
- tracks = proto.RepeatedField(
+ tracks: MutableSequence["Track"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="Track",
)
- thumbnail = proto.Field(
+ thumbnail: bytes = proto.Field(
proto.BYTES,
number=4,
)
- version = proto.Field(
+ version: str = proto.Field(
proto.STRING,
number=5,
)
@@ -701,18 +703,18 @@ class PersonDetectionAnnotation(proto.Message):
r"""Person detection annotation per video.
Attributes:
- tracks (Sequence[google.cloud.videointelligence_v1.types.Track]):
+ tracks (MutableSequence[google.cloud.videointelligence_v1.types.Track]):
The detected tracks of a person.
version (str):
Feature version.
"""
- tracks = proto.RepeatedField(
+ tracks: MutableSequence["Track"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Track",
)
- version = proto.Field(
+ version: str = proto.Field(
proto.STRING,
number=2,
)
@@ -726,7 +728,7 @@ class FaceSegment(proto.Message):
Video segment where a face was detected.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
@@ -737,7 +739,7 @@ class FaceFrame(proto.Message):
r"""Deprecated. No effect.
Attributes:
- normalized_bounding_boxes (Sequence[google.cloud.videointelligence_v1.types.NormalizedBoundingBox]):
+ normalized_bounding_boxes (MutableSequence[google.cloud.videointelligence_v1.types.NormalizedBoundingBox]):
Normalized Bounding boxes in a frame.
There can be more than one boxes if the same
face is detected in multiple locations within
@@ -748,12 +750,14 @@ class FaceFrame(proto.Message):
location.
"""
- normalized_bounding_boxes = proto.RepeatedField(
+ normalized_bounding_boxes: MutableSequence[
+ "NormalizedBoundingBox"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="NormalizedBoundingBox",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -767,22 +771,22 @@ class FaceAnnotation(proto.Message):
thumbnail (bytes):
Thumbnail of a representative face view (in
JPEG format).
- segments (Sequence[google.cloud.videointelligence_v1.types.FaceSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1.types.FaceSegment]):
All video segments where a face was detected.
- frames (Sequence[google.cloud.videointelligence_v1.types.FaceFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1.types.FaceFrame]):
All video frames where a face was detected.
"""
- thumbnail = proto.Field(
+ thumbnail: bytes = proto.Field(
proto.BYTES,
number=1,
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["FaceSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="FaceSegment",
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["FaceFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="FaceFrame",
@@ -801,29 +805,29 @@ class TimestampedObject(proto.Message):
Time-offset, relative to the beginning of the
video, corresponding to the video frame for this
object.
- attributes (Sequence[google.cloud.videointelligence_v1.types.DetectedAttribute]):
+ attributes (MutableSequence[google.cloud.videointelligence_v1.types.DetectedAttribute]):
Optional. The attributes of the object in the
bounding box.
- landmarks (Sequence[google.cloud.videointelligence_v1.types.DetectedLandmark]):
+ landmarks (MutableSequence[google.cloud.videointelligence_v1.types.DetectedLandmark]):
Optional. The detected landmarks.
"""
- normalized_bounding_box = proto.Field(
+ normalized_bounding_box: "NormalizedBoundingBox" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingBox",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
)
- attributes = proto.RepeatedField(
+ attributes: MutableSequence["DetectedAttribute"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="DetectedAttribute",
)
- landmarks = proto.RepeatedField(
+ landmarks: MutableSequence["DetectedLandmark"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="DetectedLandmark",
@@ -836,32 +840,32 @@ class Track(proto.Message):
Attributes:
segment (google.cloud.videointelligence_v1.types.VideoSegment):
Video segment of a track.
- timestamped_objects (Sequence[google.cloud.videointelligence_v1.types.TimestampedObject]):
+ timestamped_objects (MutableSequence[google.cloud.videointelligence_v1.types.TimestampedObject]):
The object with timestamp and attributes per
frame in the track.
- attributes (Sequence[google.cloud.videointelligence_v1.types.DetectedAttribute]):
+ attributes (MutableSequence[google.cloud.videointelligence_v1.types.DetectedAttribute]):
Optional. Attributes in the track level.
confidence (float):
Optional. The confidence score of the tracked
object.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- timestamped_objects = proto.RepeatedField(
+ timestamped_objects: MutableSequence["TimestampedObject"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="TimestampedObject",
)
- attributes = proto.RepeatedField(
+ attributes: MutableSequence["DetectedAttribute"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="DetectedAttribute",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=4,
)
@@ -884,15 +888,15 @@ class DetectedAttribute(proto.Message):
"black", "blonde", etc.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- value = proto.Field(
+ value: str = proto.Field(
proto.STRING,
number=3,
)
@@ -915,16 +919,16 @@ class DetectedLandmark(proto.Message):
The confidence score of the detected landmark. Range [0, 1].
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- point = proto.Field(
+ point: "NormalizedVertex" = proto.Field(
proto.MESSAGE,
number=2,
message="NormalizedVertex",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=3,
)
@@ -939,11 +943,11 @@ class VideoAnnotationResults(proto.Message):
Storage `__.
segment (google.cloud.videointelligence_v1.types.VideoSegment):
Video segment on which the annotation is run.
- segment_label_annotations (Sequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
+ segment_label_annotations (MutableSequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
Topical label annotations on video level or
user-specified segment level. There is exactly
one element for each unique label.
- segment_presence_label_annotations (Sequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
+ segment_presence_label_annotations (MutableSequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
Presence label annotations on video level or user-specified
segment level. There is exactly one element for each unique
label. Compared to the existing topical
@@ -952,11 +956,11 @@ class VideoAnnotationResults(proto.Message):
and is made available only when the client sets
``LabelDetectionConfig.model`` to "builtin/latest" in the
request.
- shot_label_annotations (Sequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
+ shot_label_annotations (MutableSequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
Topical label annotations on shot level.
There is exactly one element for each unique
label.
- shot_presence_label_annotations (Sequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
+ shot_presence_label_annotations (MutableSequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
Presence label annotations on shot level. There is exactly
one element for each unique label. Compared to the existing
topical ``shot_label_annotations``, this field presents more
@@ -964,34 +968,34 @@ class VideoAnnotationResults(proto.Message):
and is made available only when the client sets
``LabelDetectionConfig.model`` to "builtin/latest" in the
request.
- frame_label_annotations (Sequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
+ frame_label_annotations (MutableSequence[google.cloud.videointelligence_v1.types.LabelAnnotation]):
Label annotations on frame level.
There is exactly one element for each unique
label.
- face_annotations (Sequence[google.cloud.videointelligence_v1.types.FaceAnnotation]):
+ face_annotations (MutableSequence[google.cloud.videointelligence_v1.types.FaceAnnotation]):
Deprecated. Please use ``face_detection_annotations``
instead.
- face_detection_annotations (Sequence[google.cloud.videointelligence_v1.types.FaceDetectionAnnotation]):
+ face_detection_annotations (MutableSequence[google.cloud.videointelligence_v1.types.FaceDetectionAnnotation]):
Face detection annotations.
- shot_annotations (Sequence[google.cloud.videointelligence_v1.types.VideoSegment]):
+ shot_annotations (MutableSequence[google.cloud.videointelligence_v1.types.VideoSegment]):
Shot annotations. Each shot is represented as
a video segment.
explicit_annotation (google.cloud.videointelligence_v1.types.ExplicitContentAnnotation):
Explicit content annotation.
- speech_transcriptions (Sequence[google.cloud.videointelligence_v1.types.SpeechTranscription]):
+ speech_transcriptions (MutableSequence[google.cloud.videointelligence_v1.types.SpeechTranscription]):
Speech transcription.
- text_annotations (Sequence[google.cloud.videointelligence_v1.types.TextAnnotation]):
+ text_annotations (MutableSequence[google.cloud.videointelligence_v1.types.TextAnnotation]):
OCR text detection and tracking.
Annotations for list of detected text snippets.
Each will have list of frame information
associated with it.
- object_annotations (Sequence[google.cloud.videointelligence_v1.types.ObjectTrackingAnnotation]):
+ object_annotations (MutableSequence[google.cloud.videointelligence_v1.types.ObjectTrackingAnnotation]):
Annotations for list of objects detected and
tracked in video.
- logo_recognition_annotations (Sequence[google.cloud.videointelligence_v1.types.LogoRecognitionAnnotation]):
+ logo_recognition_annotations (MutableSequence[google.cloud.videointelligence_v1.types.LogoRecognitionAnnotation]):
Annotations for list of logos detected,
tracked and recognized in video.
- person_detection_annotations (Sequence[google.cloud.videointelligence_v1.types.PersonDetectionAnnotation]):
+ person_detection_annotations (MutableSequence[google.cloud.videointelligence_v1.types.PersonDetectionAnnotation]):
Person detection annotations.
error (google.rpc.status_pb2.Status):
If set, indicates an error. Note that for a single
@@ -999,86 +1003,98 @@ class VideoAnnotationResults(proto.Message):
may fail.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=10,
message="VideoSegment",
)
- segment_label_annotations = proto.RepeatedField(
+ segment_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="LabelAnnotation",
)
- segment_presence_label_annotations = proto.RepeatedField(
+ segment_presence_label_annotations: MutableSequence[
+ "LabelAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=23,
message="LabelAnnotation",
)
- shot_label_annotations = proto.RepeatedField(
+ shot_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelAnnotation",
)
- shot_presence_label_annotations = proto.RepeatedField(
+ shot_presence_label_annotations: MutableSequence[
+ "LabelAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=24,
message="LabelAnnotation",
)
- frame_label_annotations = proto.RepeatedField(
+ frame_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelAnnotation",
)
- face_annotations = proto.RepeatedField(
+ face_annotations: MutableSequence["FaceAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=5,
message="FaceAnnotation",
)
- face_detection_annotations = proto.RepeatedField(
+ face_detection_annotations: MutableSequence[
+ "FaceDetectionAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=13,
message="FaceDetectionAnnotation",
)
- shot_annotations = proto.RepeatedField(
+ shot_annotations: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="VideoSegment",
)
- explicit_annotation = proto.Field(
+ explicit_annotation: "ExplicitContentAnnotation" = proto.Field(
proto.MESSAGE,
number=7,
message="ExplicitContentAnnotation",
)
- speech_transcriptions = proto.RepeatedField(
+ speech_transcriptions: MutableSequence["SpeechTranscription"] = proto.RepeatedField(
proto.MESSAGE,
number=11,
message="SpeechTranscription",
)
- text_annotations = proto.RepeatedField(
+ text_annotations: MutableSequence["TextAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=12,
message="TextAnnotation",
)
- object_annotations = proto.RepeatedField(
+ object_annotations: MutableSequence[
+ "ObjectTrackingAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=14,
message="ObjectTrackingAnnotation",
)
- logo_recognition_annotations = proto.RepeatedField(
+ logo_recognition_annotations: MutableSequence[
+ "LogoRecognitionAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=19,
message="LogoRecognitionAnnotation",
)
- person_detection_annotations = proto.RepeatedField(
+ person_detection_annotations: MutableSequence[
+ "PersonDetectionAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=20,
message="PersonDetectionAnnotation",
)
- error = proto.Field(
+ error: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=9,
message=status_pb2.Status,
@@ -1091,12 +1107,12 @@ class AnnotateVideoResponse(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_results (Sequence[google.cloud.videointelligence_v1.types.VideoAnnotationResults]):
+ annotation_results (MutableSequence[google.cloud.videointelligence_v1.types.VideoAnnotationResults]):
Annotation results for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_results = proto.RepeatedField(
+ annotation_results: MutableSequence["VideoAnnotationResults"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationResults",
@@ -1125,30 +1141,30 @@ class VideoAnnotationProgress(proto.Message):
the request contains more than one segment.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- progress_percent = proto.Field(
+ progress_percent: int = proto.Field(
proto.INT32,
number=2,
)
- start_time = proto.Field(
+ start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- update_time = proto.Field(
+ update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
- feature = proto.Field(
+ feature: "Feature" = proto.Field(
proto.ENUM,
number=5,
enum="Feature",
)
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=6,
message="VideoSegment",
@@ -1161,12 +1177,14 @@ class AnnotateVideoProgress(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_progress (Sequence[google.cloud.videointelligence_v1.types.VideoAnnotationProgress]):
+ annotation_progress (MutableSequence[google.cloud.videointelligence_v1.types.VideoAnnotationProgress]):
Progress metadata for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_progress = proto.RepeatedField(
+ annotation_progress: MutableSequence[
+ "VideoAnnotationProgress"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationProgress",
@@ -1197,7 +1215,7 @@ class SpeechTranscriptionConfig(proto.Message):
character in each filtered word with asterisks, e.g. "f***".
If set to ``false`` or omitted, profanities won't be
filtered out.
- speech_contexts (Sequence[google.cloud.videointelligence_v1.types.SpeechContext]):
+ speech_contexts (MutableSequence[google.cloud.videointelligence_v1.types.SpeechContext]):
Optional. A means to provide context to
assist the speech recognition.
enable_automatic_punctuation (bool):
@@ -1211,7 +1229,7 @@ class SpeechTranscriptionConfig(proto.Message):
complimentary to all users. In the future this
may be exclusively available as a premium
feature.".
- audio_tracks (Sequence[int]):
+ audio_tracks (MutableSequence[int]):
Optional. For file formats, such as MXF or
MKV, supporting multiple audio tracks, specify
up to two tracks. Default: track 0.
@@ -1235,40 +1253,40 @@ class SpeechTranscriptionConfig(proto.Message):
is ``false``.
"""
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=1,
)
- max_alternatives = proto.Field(
+ max_alternatives: int = proto.Field(
proto.INT32,
number=2,
)
- filter_profanity = proto.Field(
+ filter_profanity: bool = proto.Field(
proto.BOOL,
number=3,
)
- speech_contexts = proto.RepeatedField(
+ speech_contexts: MutableSequence["SpeechContext"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="SpeechContext",
)
- enable_automatic_punctuation = proto.Field(
+ enable_automatic_punctuation: bool = proto.Field(
proto.BOOL,
number=5,
)
- audio_tracks = proto.RepeatedField(
+ audio_tracks: MutableSequence[int] = proto.RepeatedField(
proto.INT32,
number=6,
)
- enable_speaker_diarization = proto.Field(
+ enable_speaker_diarization: bool = proto.Field(
proto.BOOL,
number=7,
)
- diarization_speaker_count = proto.Field(
+ diarization_speaker_count: int = proto.Field(
proto.INT32,
number=8,
)
- enable_word_confidence = proto.Field(
+ enable_word_confidence: bool = proto.Field(
proto.BOOL,
number=9,
)
@@ -1279,7 +1297,7 @@ class SpeechContext(proto.Message):
words and phrases in the results.
Attributes:
- phrases (Sequence[str]):
+ phrases (MutableSequence[str]):
Optional. A list of strings containing words and phrases
"hints" so that the speech recognition is more likely to
recognize them. This can be used to improve the accuracy for
@@ -1290,7 +1308,7 @@ class SpeechContext(proto.Message):
limits `__.
"""
- phrases = proto.RepeatedField(
+ phrases: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
@@ -1301,7 +1319,7 @@ class SpeechTranscription(proto.Message):
audio.
Attributes:
- alternatives (Sequence[google.cloud.videointelligence_v1.types.SpeechRecognitionAlternative]):
+ alternatives (MutableSequence[google.cloud.videointelligence_v1.types.SpeechRecognitionAlternative]):
May contain one or more recognition hypotheses (up to the
maximum specified in ``max_alternatives``). These
alternatives are ordered in terms of accuracy, with the top
@@ -1315,12 +1333,12 @@ class SpeechTranscription(proto.Message):
spoken in the audio.
"""
- alternatives = proto.RepeatedField(
+ alternatives: MutableSequence["SpeechRecognitionAlternative"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="SpeechRecognitionAlternative",
)
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=2,
)
@@ -1341,22 +1359,22 @@ class SpeechRecognitionAlternative(proto.Message):
accurate and users should not rely on it to be always
provided. The default of 0.0 is a sentinel value indicating
``confidence`` was not set.
- words (Sequence[google.cloud.videointelligence_v1.types.WordInfo]):
+ words (MutableSequence[google.cloud.videointelligence_v1.types.WordInfo]):
Output only. A list of word-specific information for each
recognized word. Note: When ``enable_speaker_diarization``
is set to true, you will see all the words from the
beginning of the audio.
"""
- transcript = proto.Field(
+ transcript: str = proto.Field(
proto.STRING,
number=1,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- words = proto.RepeatedField(
+ words: MutableSequence["WordInfo"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="WordInfo",
@@ -1400,25 +1418,25 @@ class WordInfo(proto.Message):
set if speaker diarization is enabled.
"""
- start_time = proto.Field(
+ start_time: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time = proto.Field(
+ end_time: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
)
- word = proto.Field(
+ word: str = proto.Field(
proto.STRING,
number=3,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=4,
)
- speaker_tag = proto.Field(
+ speaker_tag: int = proto.Field(
proto.INT32,
number=5,
)
@@ -1436,11 +1454,11 @@ class NormalizedVertex(proto.Message):
Y coordinate.
"""
- x = proto.Field(
+ x: float = proto.Field(
proto.FLOAT,
number=1,
)
- y = proto.Field(
+ y: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -1461,11 +1479,11 @@ class NormalizedBoundingPoly(proto.Message):
calculations for location of the box.
Attributes:
- vertices (Sequence[google.cloud.videointelligence_v1.types.NormalizedVertex]):
+ vertices (MutableSequence[google.cloud.videointelligence_v1.types.NormalizedVertex]):
Normalized vertices of the bounding polygon.
"""
- vertices = proto.RepeatedField(
+ vertices: MutableSequence["NormalizedVertex"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="NormalizedVertex",
@@ -1483,21 +1501,21 @@ class TextSegment(proto.Message):
Confidence for the track of detected text. It
is calculated as the highest over all frames
where OCR detected text appears.
- frames (Sequence[google.cloud.videointelligence_v1.types.TextFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1.types.TextFrame]):
Information related to the frames where OCR
detected text appears.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["TextFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="TextFrame",
@@ -1517,12 +1535,12 @@ class TextFrame(proto.Message):
Timestamp of this frame.
"""
- rotated_bounding_box = proto.Field(
+ rotated_bounding_box: "NormalizedBoundingPoly" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingPoly",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -1537,23 +1555,23 @@ class TextAnnotation(proto.Message):
Attributes:
text (str):
The detected text.
- segments (Sequence[google.cloud.videointelligence_v1.types.TextSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1.types.TextSegment]):
All video segments where OCR detected text
appears.
version (str):
Feature version.
"""
- text = proto.Field(
+ text: str = proto.Field(
proto.STRING,
number=1,
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["TextSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="TextSegment",
)
- version = proto.Field(
+ version: str = proto.Field(
proto.STRING,
number=3,
)
@@ -1572,12 +1590,12 @@ class ObjectTrackingFrame(proto.Message):
The timestamp of the frame in microseconds.
"""
- normalized_bounding_box = proto.Field(
+ normalized_bounding_box: "NormalizedBoundingBox" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingBox",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -1616,7 +1634,7 @@ class ObjectTrackingAnnotation(proto.Message):
confidence (float):
Object category's labeling confidence of this
track.
- frames (Sequence[google.cloud.videointelligence_v1.types.ObjectTrackingFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1.types.ObjectTrackingFrame]):
Information corresponding to all frames where
this object track appears. Non-streaming batch
mode: it may be one or multiple
@@ -1627,32 +1645,32 @@ class ObjectTrackingAnnotation(proto.Message):
Feature version.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=3,
oneof="track_info",
message="VideoSegment",
)
- track_id = proto.Field(
+ track_id: int = proto.Field(
proto.INT64,
number=5,
oneof="track_info",
)
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=4,
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["ObjectTrackingFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ObjectTrackingFrame",
)
- version = proto.Field(
+ version: str = proto.Field(
proto.STRING,
number=6,
)
@@ -1667,28 +1685,28 @@ class LogoRecognitionAnnotation(proto.Message):
Entity category information to specify the
logo class that all the logo tracks within this
LogoRecognitionAnnotation are recognized as.
- tracks (Sequence[google.cloud.videointelligence_v1.types.Track]):
+ tracks (MutableSequence[google.cloud.videointelligence_v1.types.Track]):
All logo tracks where the recognized logo
appears. Each track corresponds to one logo
instance appearing in consecutive frames.
- segments (Sequence[google.cloud.videointelligence_v1.types.VideoSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1.types.VideoSegment]):
All video segments where the recognized logo
appears. There might be multiple instances of
the same logo class appearing in one
VideoSegment.
"""
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- tracks = proto.RepeatedField(
+ tracks: MutableSequence["Track"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Track",
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="VideoSegment",
diff --git a/google/cloud/videointelligence_v1beta2/__init__.py b/google/cloud/videointelligence_v1beta2/__init__.py
index d2b9af45..d77da4ce 100644
--- a/google/cloud/videointelligence_v1beta2/__init__.py
+++ b/google/cloud/videointelligence_v1beta2/__init__.py
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.videointelligence import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from .services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py
index 66264a05..2b6382fd 100644
--- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py
+++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py
@@ -16,7 +16,17 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
@@ -164,9 +174,9 @@ def transport(self) -> VideoIntelligenceServiceTransport:
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -210,12 +220,12 @@ def __init__(
async def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -241,7 +251,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1beta2.AnnotateVideoRequest(
- features="FACE_DETECTION",
+ features=['FACE_DETECTION'],
)
# Make the request
@@ -255,7 +265,7 @@ async def sample_annotate_video():
print(response)
Args:
- request (Union[google.cloud.videointelligence_v1beta2.types.AnnotateVideoRequest, dict]):
+ request (Optional[Union[google.cloud.videointelligence_v1beta2.types.AnnotateVideoRequest, dict]]):
The request object. Video annotation request.
input_uri (:class:`str`):
Input video location. Currently, only `Google Cloud
@@ -276,7 +286,7 @@ async def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (:class:`Sequence[google.cloud.videointelligence_v1beta2.types.Feature]`):
+ features (:class:`MutableSequence[google.cloud.videointelligence_v1beta2.types.Feature]`):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py
index da9e0a62..1f240032 100644
--- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py
+++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py
@@ -16,7 +16,18 @@
from collections import OrderedDict
import os
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
@@ -60,7 +71,7 @@ class VideoIntelligenceServiceClientMeta(type):
def get_transport_class(
cls,
- label: str = None,
+ label: Optional[str] = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
@@ -313,8 +324,8 @@ def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[Union[str, VideoIntelligenceServiceTransport]] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -328,7 +339,7 @@ def __init__(
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
@@ -358,6 +369,7 @@ def __init__(
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
+ client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
@@ -410,12 +422,12 @@ def __init__(
def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -441,7 +453,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1beta2.AnnotateVideoRequest(
- features="FACE_DETECTION",
+ features=['FACE_DETECTION'],
)
# Make the request
@@ -476,7 +488,7 @@ def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (Sequence[google.cloud.videointelligence_v1beta2.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1beta2.types.Feature]):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py
index 923b93d7..0aa030b7 100644
--- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py
+++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py
@@ -49,7 +49,7 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py
index 1a4cc55c..92d94ff3 100644
--- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py
+++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py
@@ -47,14 +47,14 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[grpc.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
@@ -182,8 +182,8 @@ def __init__(
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py
index 54cca119..65d431ac 100644
--- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py
+++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py
@@ -49,7 +49,7 @@ class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTrans
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -92,15 +92,15 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[aio.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1beta2/types/video_intelligence.py b/google/cloud/videointelligence_v1beta2/types/video_intelligence.py
index 70bdcc8b..23c1631e 100644
--- a/google/cloud/videointelligence_v1beta2/types/video_intelligence.py
+++ b/google/cloud/videointelligence_v1beta2/types/video_intelligence.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from typing import MutableMapping, MutableSequence
+
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
@@ -98,7 +100,7 @@ class AnnotateVideoRequest(proto.Message):
The video data bytes. If unset, the input video(s) should be
specified via ``input_uri``. If set, ``input_uri`` should be
unset.
- features (Sequence[google.cloud.videointelligence_v1beta2.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1beta2.types.Feature]):
Required. Requested video annotation
features.
video_context (google.cloud.videointelligence_v1beta2.types.VideoContext):
@@ -120,29 +122,29 @@ class AnnotateVideoRequest(proto.Message):
a region will be determined based on video file location.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- input_content = proto.Field(
+ input_content: bytes = proto.Field(
proto.BYTES,
number=6,
)
- features = proto.RepeatedField(
+ features: MutableSequence["Feature"] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="Feature",
)
- video_context = proto.Field(
+ video_context: "VideoContext" = proto.Field(
proto.MESSAGE,
number=3,
message="VideoContext",
)
- output_uri = proto.Field(
+ output_uri: str = proto.Field(
proto.STRING,
number=4,
)
- location_id = proto.Field(
+ location_id: str = proto.Field(
proto.STRING,
number=5,
)
@@ -152,7 +154,7 @@ class VideoContext(proto.Message):
r"""Video context and/or feature-specific parameters.
Attributes:
- segments (Sequence[google.cloud.videointelligence_v1beta2.types.VideoSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1beta2.types.VideoSegment]):
Video segments to annotate. The segments may
overlap and are not required to be contiguous or
span the whole video. If unspecified, each video
@@ -167,27 +169,27 @@ class VideoContext(proto.Message):
Config for FACE_DETECTION.
"""
- segments = proto.RepeatedField(
+ segments: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- label_detection_config = proto.Field(
+ label_detection_config: "LabelDetectionConfig" = proto.Field(
proto.MESSAGE,
number=2,
message="LabelDetectionConfig",
)
- shot_change_detection_config = proto.Field(
+ shot_change_detection_config: "ShotChangeDetectionConfig" = proto.Field(
proto.MESSAGE,
number=3,
message="ShotChangeDetectionConfig",
)
- explicit_content_detection_config = proto.Field(
+ explicit_content_detection_config: "ExplicitContentDetectionConfig" = proto.Field(
proto.MESSAGE,
number=4,
message="ExplicitContentDetectionConfig",
)
- face_detection_config = proto.Field(
+ face_detection_config: "FaceDetectionConfig" = proto.Field(
proto.MESSAGE,
number=5,
message="FaceDetectionConfig",
@@ -213,16 +215,16 @@ class LabelDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- label_detection_mode = proto.Field(
+ label_detection_mode: "LabelDetectionMode" = proto.Field(
proto.ENUM,
number=1,
enum="LabelDetectionMode",
)
- stationary_camera = proto.Field(
+ stationary_camera: bool = proto.Field(
proto.BOOL,
number=2,
)
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=3,
)
@@ -238,7 +240,7 @@ class ShotChangeDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -254,7 +256,7 @@ class ExplicitContentDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -273,11 +275,11 @@ class FaceDetectionConfig(proto.Message):
face annotation output.
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
- include_bounding_boxes = proto.Field(
+ include_bounding_boxes: bool = proto.Field(
proto.BOOL,
number=2,
)
@@ -297,12 +299,12 @@ class VideoSegment(proto.Message):
(inclusive).
"""
- start_time_offset = proto.Field(
+ start_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time_offset = proto.Field(
+ end_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -319,12 +321,12 @@ class LabelSegment(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -342,12 +344,12 @@ class LabelFrame(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -367,15 +369,15 @@ class Entity(proto.Message):
Language code for ``description`` in BCP-47 format.
"""
- entity_id = proto.Field(
+ entity_id: str = proto.Field(
proto.STRING,
number=1,
)
- description = proto.Field(
+ description: str = proto.Field(
proto.STRING,
number=2,
)
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=3,
)
@@ -387,34 +389,34 @@ class LabelAnnotation(proto.Message):
Attributes:
entity (google.cloud.videointelligence_v1beta2.types.Entity):
Detected entity.
- category_entities (Sequence[google.cloud.videointelligence_v1beta2.types.Entity]):
+ category_entities (MutableSequence[google.cloud.videointelligence_v1beta2.types.Entity]):
Common categories for the detected entity. E.g. when the
label is ``Terrier`` the category is likely ``dog``. And in
some cases there might be more than one categories e.g.
``Terrier`` could also be a ``pet``.
- segments (Sequence[google.cloud.videointelligence_v1beta2.types.LabelSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1beta2.types.LabelSegment]):
All video segments where a label was
detected.
- frames (Sequence[google.cloud.videointelligence_v1beta2.types.LabelFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1beta2.types.LabelFrame]):
All video frames where a label was detected.
"""
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- category_entities = proto.RepeatedField(
+ category_entities: MutableSequence["Entity"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Entity",
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["LabelSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelSegment",
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["LabelFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelFrame",
@@ -433,12 +435,12 @@ class ExplicitContentFrame(proto.Message):
Likelihood of the pornography content..
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- pornography_likelihood = proto.Field(
+ pornography_likelihood: "Likelihood" = proto.Field(
proto.ENUM,
number=2,
enum="Likelihood",
@@ -451,12 +453,12 @@ class ExplicitContentAnnotation(proto.Message):
frame, no annotations are present for that frame.
Attributes:
- frames (Sequence[google.cloud.videointelligence_v1beta2.types.ExplicitContentFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1beta2.types.ExplicitContentFrame]):
All video frames where explicit content was
detected.
"""
- frames = proto.RepeatedField(
+ frames: MutableSequence["ExplicitContentFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="ExplicitContentFrame",
@@ -478,19 +480,19 @@ class NormalizedBoundingBox(proto.Message):
Bottom Y coordinate.
"""
- left = proto.Field(
+ left: float = proto.Field(
proto.FLOAT,
number=1,
)
- top = proto.Field(
+ top: float = proto.Field(
proto.FLOAT,
number=2,
)
- right = proto.Field(
+ right: float = proto.Field(
proto.FLOAT,
number=3,
)
- bottom = proto.Field(
+ bottom: float = proto.Field(
proto.FLOAT,
number=4,
)
@@ -504,7 +506,7 @@ class FaceSegment(proto.Message):
Video segment where a face was detected.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
@@ -515,7 +517,7 @@ class FaceFrame(proto.Message):
r"""Video frame level annotation results for face detection.
Attributes:
- normalized_bounding_boxes (Sequence[google.cloud.videointelligence_v1beta2.types.NormalizedBoundingBox]):
+ normalized_bounding_boxes (MutableSequence[google.cloud.videointelligence_v1beta2.types.NormalizedBoundingBox]):
Normalized Bounding boxes in a frame.
There can be more than one boxes if the same
face is detected in multiple locations within
@@ -526,12 +528,14 @@ class FaceFrame(proto.Message):
location.
"""
- normalized_bounding_boxes = proto.RepeatedField(
+ normalized_bounding_boxes: MutableSequence[
+ "NormalizedBoundingBox"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="NormalizedBoundingBox",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -545,22 +549,22 @@ class FaceAnnotation(proto.Message):
thumbnail (bytes):
Thumbnail of a representative face view (in
JPEG format).
- segments (Sequence[google.cloud.videointelligence_v1beta2.types.FaceSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1beta2.types.FaceSegment]):
All video segments where a face was detected.
- frames (Sequence[google.cloud.videointelligence_v1beta2.types.FaceFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1beta2.types.FaceFrame]):
All video frames where a face was detected.
"""
- thumbnail = proto.Field(
+ thumbnail: bytes = proto.Field(
proto.BYTES,
number=1,
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["FaceSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="FaceSegment",
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["FaceFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="FaceFrame",
@@ -574,22 +578,22 @@ class VideoAnnotationResults(proto.Message):
input_uri (str):
Video file location in `Google Cloud
Storage `__.
- segment_label_annotations (Sequence[google.cloud.videointelligence_v1beta2.types.LabelAnnotation]):
+ segment_label_annotations (MutableSequence[google.cloud.videointelligence_v1beta2.types.LabelAnnotation]):
Label annotations on video level or user
specified segment level. There is exactly one
element for each unique label.
- shot_label_annotations (Sequence[google.cloud.videointelligence_v1beta2.types.LabelAnnotation]):
+ shot_label_annotations (MutableSequence[google.cloud.videointelligence_v1beta2.types.LabelAnnotation]):
Label annotations on shot level.
There is exactly one element for each unique
label.
- frame_label_annotations (Sequence[google.cloud.videointelligence_v1beta2.types.LabelAnnotation]):
+ frame_label_annotations (MutableSequence[google.cloud.videointelligence_v1beta2.types.LabelAnnotation]):
Label annotations on frame level.
There is exactly one element for each unique
label.
- face_annotations (Sequence[google.cloud.videointelligence_v1beta2.types.FaceAnnotation]):
+ face_annotations (MutableSequence[google.cloud.videointelligence_v1beta2.types.FaceAnnotation]):
Face annotations. There is exactly one
element for each unique face.
- shot_annotations (Sequence[google.cloud.videointelligence_v1beta2.types.VideoSegment]):
+ shot_annotations (MutableSequence[google.cloud.videointelligence_v1beta2.types.VideoSegment]):
Shot annotations. Each shot is represented as
a video segment.
explicit_annotation (google.cloud.videointelligence_v1beta2.types.ExplicitContentAnnotation):
@@ -600,41 +604,41 @@ class VideoAnnotationResults(proto.Message):
may fail.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- segment_label_annotations = proto.RepeatedField(
+ segment_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="LabelAnnotation",
)
- shot_label_annotations = proto.RepeatedField(
+ shot_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelAnnotation",
)
- frame_label_annotations = proto.RepeatedField(
+ frame_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelAnnotation",
)
- face_annotations = proto.RepeatedField(
+ face_annotations: MutableSequence["FaceAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=5,
message="FaceAnnotation",
)
- shot_annotations = proto.RepeatedField(
+ shot_annotations: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="VideoSegment",
)
- explicit_annotation = proto.Field(
+ explicit_annotation: "ExplicitContentAnnotation" = proto.Field(
proto.MESSAGE,
number=7,
message="ExplicitContentAnnotation",
)
- error = proto.Field(
+ error: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=9,
message=status_pb2.Status,
@@ -647,12 +651,12 @@ class AnnotateVideoResponse(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_results (Sequence[google.cloud.videointelligence_v1beta2.types.VideoAnnotationResults]):
+ annotation_results (MutableSequence[google.cloud.videointelligence_v1beta2.types.VideoAnnotationResults]):
Annotation results for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_results = proto.RepeatedField(
+ annotation_results: MutableSequence["VideoAnnotationResults"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationResults",
@@ -675,20 +679,20 @@ class VideoAnnotationProgress(proto.Message):
Time of the most recent update.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- progress_percent = proto.Field(
+ progress_percent: int = proto.Field(
proto.INT32,
number=2,
)
- start_time = proto.Field(
+ start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- update_time = proto.Field(
+ update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
@@ -701,12 +705,14 @@ class AnnotateVideoProgress(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_progress (Sequence[google.cloud.videointelligence_v1beta2.types.VideoAnnotationProgress]):
+ annotation_progress (MutableSequence[google.cloud.videointelligence_v1beta2.types.VideoAnnotationProgress]):
Progress metadata for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_progress = proto.RepeatedField(
+ annotation_progress: MutableSequence[
+ "VideoAnnotationProgress"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationProgress",
diff --git a/google/cloud/videointelligence_v1p1beta1/__init__.py b/google/cloud/videointelligence_v1p1beta1/__init__.py
index 58ac4ee3..7ec3f37f 100644
--- a/google/cloud/videointelligence_v1p1beta1/__init__.py
+++ b/google/cloud/videointelligence_v1p1beta1/__init__.py
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.videointelligence import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from .services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py
index 6d8b157b..589b921a 100644
--- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py
+++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py
@@ -16,7 +16,17 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
@@ -164,9 +174,9 @@ def transport(self) -> VideoIntelligenceServiceTransport:
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -210,12 +220,12 @@ def __init__(
async def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -241,7 +251,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p1beta1.AnnotateVideoRequest(
- features="SPEECH_TRANSCRIPTION",
+ features=['SPEECH_TRANSCRIPTION'],
)
# Make the request
@@ -255,7 +265,7 @@ async def sample_annotate_video():
print(response)
Args:
- request (Union[google.cloud.videointelligence_v1p1beta1.types.AnnotateVideoRequest, dict]):
+ request (Optional[Union[google.cloud.videointelligence_v1p1beta1.types.AnnotateVideoRequest, dict]]):
The request object. Video annotation request.
input_uri (:class:`str`):
Input video location. Currently, only `Google Cloud
@@ -276,7 +286,7 @@ async def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (:class:`Sequence[google.cloud.videointelligence_v1p1beta1.types.Feature]`):
+ features (:class:`MutableSequence[google.cloud.videointelligence_v1p1beta1.types.Feature]`):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py
index 2ff2801f..6b71a724 100644
--- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py
+++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py
@@ -16,7 +16,18 @@
from collections import OrderedDict
import os
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
@@ -60,7 +71,7 @@ class VideoIntelligenceServiceClientMeta(type):
def get_transport_class(
cls,
- label: str = None,
+ label: Optional[str] = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
@@ -313,8 +324,8 @@ def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[Union[str, VideoIntelligenceServiceTransport]] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -328,7 +339,7 @@ def __init__(
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
@@ -358,6 +369,7 @@ def __init__(
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
+ client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
@@ -410,12 +422,12 @@ def __init__(
def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -441,7 +453,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p1beta1.AnnotateVideoRequest(
- features="SPEECH_TRANSCRIPTION",
+ features=['SPEECH_TRANSCRIPTION'],
)
# Make the request
@@ -476,7 +488,7 @@ def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (Sequence[google.cloud.videointelligence_v1p1beta1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.Feature]):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py
index afbfe30c..234be53c 100644
--- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py
+++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py
@@ -49,7 +49,7 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py
index bf467dd5..f36834f8 100644
--- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py
+++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py
@@ -47,14 +47,14 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[grpc.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
@@ -182,8 +182,8 @@ def __init__(
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py
index 15b98626..5bc81987 100644
--- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py
+++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py
@@ -49,7 +49,7 @@ class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTrans
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -92,15 +92,15 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[aio.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py b/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py
index 1bf5ce56..362bb0f8 100644
--- a/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py
+++ b/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from typing import MutableMapping, MutableSequence
+
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
@@ -98,7 +100,7 @@ class AnnotateVideoRequest(proto.Message):
The video data bytes. If unset, the input video(s) should be
specified via ``input_uri``. If set, ``input_uri`` should be
unset.
- features (Sequence[google.cloud.videointelligence_v1p1beta1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.Feature]):
Required. Requested video annotation
features.
video_context (google.cloud.videointelligence_v1p1beta1.types.VideoContext):
@@ -120,29 +122,29 @@ class AnnotateVideoRequest(proto.Message):
a region will be determined based on video file location.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- input_content = proto.Field(
+ input_content: bytes = proto.Field(
proto.BYTES,
number=6,
)
- features = proto.RepeatedField(
+ features: MutableSequence["Feature"] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="Feature",
)
- video_context = proto.Field(
+ video_context: "VideoContext" = proto.Field(
proto.MESSAGE,
number=3,
message="VideoContext",
)
- output_uri = proto.Field(
+ output_uri: str = proto.Field(
proto.STRING,
number=4,
)
- location_id = proto.Field(
+ location_id: str = proto.Field(
proto.STRING,
number=5,
)
@@ -152,7 +154,7 @@ class VideoContext(proto.Message):
r"""Video context and/or feature-specific parameters.
Attributes:
- segments (Sequence[google.cloud.videointelligence_v1p1beta1.types.VideoSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.VideoSegment]):
Video segments to annotate. The segments may
overlap and are not required to be contiguous or
span the whole video. If unspecified, each video
@@ -167,27 +169,27 @@ class VideoContext(proto.Message):
Config for SPEECH_TRANSCRIPTION.
"""
- segments = proto.RepeatedField(
+ segments: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- label_detection_config = proto.Field(
+ label_detection_config: "LabelDetectionConfig" = proto.Field(
proto.MESSAGE,
number=2,
message="LabelDetectionConfig",
)
- shot_change_detection_config = proto.Field(
+ shot_change_detection_config: "ShotChangeDetectionConfig" = proto.Field(
proto.MESSAGE,
number=3,
message="ShotChangeDetectionConfig",
)
- explicit_content_detection_config = proto.Field(
+ explicit_content_detection_config: "ExplicitContentDetectionConfig" = proto.Field(
proto.MESSAGE,
number=4,
message="ExplicitContentDetectionConfig",
)
- speech_transcription_config = proto.Field(
+ speech_transcription_config: "SpeechTranscriptionConfig" = proto.Field(
proto.MESSAGE,
number=6,
message="SpeechTranscriptionConfig",
@@ -213,16 +215,16 @@ class LabelDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- label_detection_mode = proto.Field(
+ label_detection_mode: "LabelDetectionMode" = proto.Field(
proto.ENUM,
number=1,
enum="LabelDetectionMode",
)
- stationary_camera = proto.Field(
+ stationary_camera: bool = proto.Field(
proto.BOOL,
number=2,
)
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=3,
)
@@ -238,7 +240,7 @@ class ShotChangeDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -254,7 +256,7 @@ class ExplicitContentDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -274,12 +276,12 @@ class VideoSegment(proto.Message):
(inclusive).
"""
- start_time_offset = proto.Field(
+ start_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time_offset = proto.Field(
+ end_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -296,12 +298,12 @@ class LabelSegment(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -319,12 +321,12 @@ class LabelFrame(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -344,15 +346,15 @@ class Entity(proto.Message):
Language code for ``description`` in BCP-47 format.
"""
- entity_id = proto.Field(
+ entity_id: str = proto.Field(
proto.STRING,
number=1,
)
- description = proto.Field(
+ description: str = proto.Field(
proto.STRING,
number=2,
)
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=3,
)
@@ -364,34 +366,34 @@ class LabelAnnotation(proto.Message):
Attributes:
entity (google.cloud.videointelligence_v1p1beta1.types.Entity):
Detected entity.
- category_entities (Sequence[google.cloud.videointelligence_v1p1beta1.types.Entity]):
+ category_entities (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.Entity]):
Common categories for the detected entity. E.g. when the
label is ``Terrier`` the category is likely ``dog``. And in
some cases there might be more than one categories e.g.
``Terrier`` could also be a ``pet``.
- segments (Sequence[google.cloud.videointelligence_v1p1beta1.types.LabelSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.LabelSegment]):
All video segments where a label was
detected.
- frames (Sequence[google.cloud.videointelligence_v1p1beta1.types.LabelFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.LabelFrame]):
All video frames where a label was detected.
"""
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- category_entities = proto.RepeatedField(
+ category_entities: MutableSequence["Entity"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Entity",
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["LabelSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelSegment",
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["LabelFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelFrame",
@@ -410,12 +412,12 @@ class ExplicitContentFrame(proto.Message):
Likelihood of the pornography content..
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- pornography_likelihood = proto.Field(
+ pornography_likelihood: "Likelihood" = proto.Field(
proto.ENUM,
number=2,
enum="Likelihood",
@@ -428,12 +430,12 @@ class ExplicitContentAnnotation(proto.Message):
frame, no annotations are present for that frame.
Attributes:
- frames (Sequence[google.cloud.videointelligence_v1p1beta1.types.ExplicitContentFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.ExplicitContentFrame]):
All video frames where explicit content was
detected.
"""
- frames = proto.RepeatedField(
+ frames: MutableSequence["ExplicitContentFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="ExplicitContentFrame",
@@ -447,24 +449,24 @@ class VideoAnnotationResults(proto.Message):
input_uri (str):
Output only. Video file location in `Google Cloud
Storage `__.
- segment_label_annotations (Sequence[google.cloud.videointelligence_v1p1beta1.types.LabelAnnotation]):
+ segment_label_annotations (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.LabelAnnotation]):
Label annotations on video level or user
specified segment level. There is exactly one
element for each unique label.
- shot_label_annotations (Sequence[google.cloud.videointelligence_v1p1beta1.types.LabelAnnotation]):
+ shot_label_annotations (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.LabelAnnotation]):
Label annotations on shot level.
There is exactly one element for each unique
label.
- frame_label_annotations (Sequence[google.cloud.videointelligence_v1p1beta1.types.LabelAnnotation]):
+ frame_label_annotations (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.LabelAnnotation]):
Label annotations on frame level.
There is exactly one element for each unique
label.
- shot_annotations (Sequence[google.cloud.videointelligence_v1p1beta1.types.VideoSegment]):
+ shot_annotations (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.VideoSegment]):
Shot annotations. Each shot is represented as
a video segment.
explicit_annotation (google.cloud.videointelligence_v1p1beta1.types.ExplicitContentAnnotation):
Explicit content annotation.
- speech_transcriptions (Sequence[google.cloud.videointelligence_v1p1beta1.types.SpeechTranscription]):
+ speech_transcriptions (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.SpeechTranscription]):
Speech transcription.
error (google.rpc.status_pb2.Status):
Output only. If set, indicates an error. Note that for a
@@ -472,41 +474,41 @@ class VideoAnnotationResults(proto.Message):
some may fail.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- segment_label_annotations = proto.RepeatedField(
+ segment_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="LabelAnnotation",
)
- shot_label_annotations = proto.RepeatedField(
+ shot_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelAnnotation",
)
- frame_label_annotations = proto.RepeatedField(
+ frame_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelAnnotation",
)
- shot_annotations = proto.RepeatedField(
+ shot_annotations: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="VideoSegment",
)
- explicit_annotation = proto.Field(
+ explicit_annotation: "ExplicitContentAnnotation" = proto.Field(
proto.MESSAGE,
number=7,
message="ExplicitContentAnnotation",
)
- speech_transcriptions = proto.RepeatedField(
+ speech_transcriptions: MutableSequence["SpeechTranscription"] = proto.RepeatedField(
proto.MESSAGE,
number=11,
message="SpeechTranscription",
)
- error = proto.Field(
+ error: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=9,
message=status_pb2.Status,
@@ -519,12 +521,12 @@ class AnnotateVideoResponse(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_results (Sequence[google.cloud.videointelligence_v1p1beta1.types.VideoAnnotationResults]):
+ annotation_results (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.VideoAnnotationResults]):
Annotation results for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_results = proto.RepeatedField(
+ annotation_results: MutableSequence["VideoAnnotationResults"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationResults",
@@ -549,20 +551,20 @@ class VideoAnnotationProgress(proto.Message):
Output only. Time of the most recent update.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- progress_percent = proto.Field(
+ progress_percent: int = proto.Field(
proto.INT32,
number=2,
)
- start_time = proto.Field(
+ start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- update_time = proto.Field(
+ update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
@@ -575,12 +577,14 @@ class AnnotateVideoProgress(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_progress (Sequence[google.cloud.videointelligence_v1p1beta1.types.VideoAnnotationProgress]):
+ annotation_progress (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.VideoAnnotationProgress]):
Progress metadata for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_progress = proto.RepeatedField(
+ annotation_progress: MutableSequence[
+ "VideoAnnotationProgress"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationProgress",
@@ -611,7 +615,7 @@ class SpeechTranscriptionConfig(proto.Message):
character in each filtered word with asterisks, e.g. "f***".
If set to ``false`` or omitted, profanities won't be
filtered out.
- speech_contexts (Sequence[google.cloud.videointelligence_v1p1beta1.types.SpeechContext]):
+ speech_contexts (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.SpeechContext]):
Optional. A means to provide context to
assist the speech recognition.
enable_automatic_punctuation (bool):
@@ -625,34 +629,34 @@ class SpeechTranscriptionConfig(proto.Message):
complimentary to all users. In the future this
may be exclusively available as a premium
feature.".
- audio_tracks (Sequence[int]):
+ audio_tracks (MutableSequence[int]):
Optional. For file formats, such as MXF or
MKV, supporting multiple audio tracks, specify
up to two tracks. Default: track 0.
"""
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=1,
)
- max_alternatives = proto.Field(
+ max_alternatives: int = proto.Field(
proto.INT32,
number=2,
)
- filter_profanity = proto.Field(
+ filter_profanity: bool = proto.Field(
proto.BOOL,
number=3,
)
- speech_contexts = proto.RepeatedField(
+ speech_contexts: MutableSequence["SpeechContext"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="SpeechContext",
)
- enable_automatic_punctuation = proto.Field(
+ enable_automatic_punctuation: bool = proto.Field(
proto.BOOL,
number=5,
)
- audio_tracks = proto.RepeatedField(
+ audio_tracks: MutableSequence[int] = proto.RepeatedField(
proto.INT32,
number=6,
)
@@ -663,7 +667,7 @@ class SpeechContext(proto.Message):
words and phrases in the results.
Attributes:
- phrases (Sequence[str]):
+ phrases (MutableSequence[str]):
Optional. A list of strings containing words and phrases
"hints" so that the speech recognition is more likely to
recognize them. This can be used to improve the accuracy for
@@ -674,7 +678,7 @@ class SpeechContext(proto.Message):
limits `__.
"""
- phrases = proto.RepeatedField(
+ phrases: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
@@ -685,7 +689,7 @@ class SpeechTranscription(proto.Message):
audio.
Attributes:
- alternatives (Sequence[google.cloud.videointelligence_v1p1beta1.types.SpeechRecognitionAlternative]):
+ alternatives (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.SpeechRecognitionAlternative]):
May contain one or more recognition hypotheses (up to the
maximum specified in ``max_alternatives``). These
alternatives are ordered in terms of accuracy, with the top
@@ -693,7 +697,7 @@ class SpeechTranscription(proto.Message):
the recognizer.
"""
- alternatives = proto.RepeatedField(
+ alternatives: MutableSequence["SpeechRecognitionAlternative"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="SpeechRecognitionAlternative",
@@ -715,20 +719,20 @@ class SpeechRecognitionAlternative(proto.Message):
accurate and users should not rely on it to be always
provided. The default of 0.0 is a sentinel value indicating
``confidence`` was not set.
- words (Sequence[google.cloud.videointelligence_v1p1beta1.types.WordInfo]):
+ words (MutableSequence[google.cloud.videointelligence_v1p1beta1.types.WordInfo]):
Output only. A list of word-specific
information for each recognized word.
"""
- transcript = proto.Field(
+ transcript: str = proto.Field(
proto.STRING,
number=1,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- words = proto.RepeatedField(
+ words: MutableSequence["WordInfo"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="WordInfo",
@@ -758,17 +762,17 @@ class WordInfo(proto.Message):
set of information.
"""
- start_time = proto.Field(
+ start_time: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time = proto.Field(
+ end_time: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
)
- word = proto.Field(
+ word: str = proto.Field(
proto.STRING,
number=3,
)
diff --git a/google/cloud/videointelligence_v1p2beta1/__init__.py b/google/cloud/videointelligence_v1p2beta1/__init__.py
index afb610ea..5bd373a3 100644
--- a/google/cloud/videointelligence_v1p2beta1/__init__.py
+++ b/google/cloud/videointelligence_v1p2beta1/__init__.py
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.videointelligence import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from .services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py
index 6f0e938d..b135161e 100644
--- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py
+++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py
@@ -16,7 +16,17 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
@@ -164,9 +174,9 @@ def transport(self) -> VideoIntelligenceServiceTransport:
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -210,12 +220,12 @@ def __init__(
async def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -241,7 +251,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p2beta1.AnnotateVideoRequest(
- features="OBJECT_TRACKING",
+ features=['OBJECT_TRACKING'],
)
# Make the request
@@ -255,7 +265,7 @@ async def sample_annotate_video():
print(response)
Args:
- request (Union[google.cloud.videointelligence_v1p2beta1.types.AnnotateVideoRequest, dict]):
+ request (Optional[Union[google.cloud.videointelligence_v1p2beta1.types.AnnotateVideoRequest, dict]]):
The request object. Video annotation request.
input_uri (:class:`str`):
Input video location. Currently, only `Google Cloud
@@ -276,7 +286,7 @@ async def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (:class:`Sequence[google.cloud.videointelligence_v1p2beta1.types.Feature]`):
+ features (:class:`MutableSequence[google.cloud.videointelligence_v1p2beta1.types.Feature]`):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py
index 8828c996..da054ee0 100644
--- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py
+++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py
@@ -16,7 +16,18 @@
from collections import OrderedDict
import os
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
@@ -60,7 +71,7 @@ class VideoIntelligenceServiceClientMeta(type):
def get_transport_class(
cls,
- label: str = None,
+ label: Optional[str] = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
@@ -313,8 +324,8 @@ def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[Union[str, VideoIntelligenceServiceTransport]] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -328,7 +339,7 @@ def __init__(
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
@@ -358,6 +369,7 @@ def __init__(
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
+ client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
@@ -410,12 +422,12 @@ def __init__(
def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -441,7 +453,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p2beta1.AnnotateVideoRequest(
- features="OBJECT_TRACKING",
+ features=['OBJECT_TRACKING'],
)
# Make the request
@@ -476,7 +488,7 @@ def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (Sequence[google.cloud.videointelligence_v1p2beta1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.Feature]):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py
index cdc6a606..00f2738e 100644
--- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py
+++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py
@@ -49,7 +49,7 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py
index 77fac597..3b8c3952 100644
--- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py
+++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py
@@ -47,14 +47,14 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[grpc.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
@@ -182,8 +182,8 @@ def __init__(
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py
index 539000b1..3215416e 100644
--- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py
+++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py
@@ -49,7 +49,7 @@ class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTrans
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -92,15 +92,15 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[aio.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py b/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py
index 1f797f15..801e4909 100644
--- a/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py
+++ b/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from typing import MutableMapping, MutableSequence
+
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
@@ -103,7 +105,7 @@ class AnnotateVideoRequest(proto.Message):
The video data bytes. If unset, the input video(s) should be
specified via ``input_uri``. If set, ``input_uri`` should be
unset.
- features (Sequence[google.cloud.videointelligence_v1p2beta1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.Feature]):
Required. Requested video annotation
features.
video_context (google.cloud.videointelligence_v1p2beta1.types.VideoContext):
@@ -125,29 +127,29 @@ class AnnotateVideoRequest(proto.Message):
a region will be determined based on video file location.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- input_content = proto.Field(
+ input_content: bytes = proto.Field(
proto.BYTES,
number=6,
)
- features = proto.RepeatedField(
+ features: MutableSequence["Feature"] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="Feature",
)
- video_context = proto.Field(
+ video_context: "VideoContext" = proto.Field(
proto.MESSAGE,
number=3,
message="VideoContext",
)
- output_uri = proto.Field(
+ output_uri: str = proto.Field(
proto.STRING,
number=4,
)
- location_id = proto.Field(
+ location_id: str = proto.Field(
proto.STRING,
number=5,
)
@@ -157,7 +159,7 @@ class VideoContext(proto.Message):
r"""Video context and/or feature-specific parameters.
Attributes:
- segments (Sequence[google.cloud.videointelligence_v1p2beta1.types.VideoSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.VideoSegment]):
Video segments to annotate. The segments may
overlap and are not required to be contiguous or
span the whole video. If unspecified, each video
@@ -172,27 +174,27 @@ class VideoContext(proto.Message):
Config for TEXT_DETECTION.
"""
- segments = proto.RepeatedField(
+ segments: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- label_detection_config = proto.Field(
+ label_detection_config: "LabelDetectionConfig" = proto.Field(
proto.MESSAGE,
number=2,
message="LabelDetectionConfig",
)
- shot_change_detection_config = proto.Field(
+ shot_change_detection_config: "ShotChangeDetectionConfig" = proto.Field(
proto.MESSAGE,
number=3,
message="ShotChangeDetectionConfig",
)
- explicit_content_detection_config = proto.Field(
+ explicit_content_detection_config: "ExplicitContentDetectionConfig" = proto.Field(
proto.MESSAGE,
number=4,
message="ExplicitContentDetectionConfig",
)
- text_detection_config = proto.Field(
+ text_detection_config: "TextDetectionConfig" = proto.Field(
proto.MESSAGE,
number=8,
message="TextDetectionConfig",
@@ -218,16 +220,16 @@ class LabelDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- label_detection_mode = proto.Field(
+ label_detection_mode: "LabelDetectionMode" = proto.Field(
proto.ENUM,
number=1,
enum="LabelDetectionMode",
)
- stationary_camera = proto.Field(
+ stationary_camera: bool = proto.Field(
proto.BOOL,
number=2,
)
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=3,
)
@@ -243,7 +245,7 @@ class ShotChangeDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -259,7 +261,7 @@ class ExplicitContentDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -269,7 +271,7 @@ class TextDetectionConfig(proto.Message):
r"""Config for TEXT_DETECTION.
Attributes:
- language_hints (Sequence[str]):
+ language_hints (MutableSequence[str]):
Language hint can be specified if the
language to be detected is known a priori. It
can increase the accuracy of the detection.
@@ -280,7 +282,7 @@ class TextDetectionConfig(proto.Message):
hint is provided.
"""
- language_hints = proto.RepeatedField(
+ language_hints: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
@@ -300,12 +302,12 @@ class VideoSegment(proto.Message):
(inclusive).
"""
- start_time_offset = proto.Field(
+ start_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time_offset = proto.Field(
+ end_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -322,12 +324,12 @@ class LabelSegment(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -345,12 +347,12 @@ class LabelFrame(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -370,15 +372,15 @@ class Entity(proto.Message):
Language code for ``description`` in BCP-47 format.
"""
- entity_id = proto.Field(
+ entity_id: str = proto.Field(
proto.STRING,
number=1,
)
- description = proto.Field(
+ description: str = proto.Field(
proto.STRING,
number=2,
)
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=3,
)
@@ -390,34 +392,34 @@ class LabelAnnotation(proto.Message):
Attributes:
entity (google.cloud.videointelligence_v1p2beta1.types.Entity):
Detected entity.
- category_entities (Sequence[google.cloud.videointelligence_v1p2beta1.types.Entity]):
+ category_entities (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.Entity]):
Common categories for the detected entity. E.g. when the
label is ``Terrier`` the category is likely ``dog``. And in
some cases there might be more than one categories e.g.
``Terrier`` could also be a ``pet``.
- segments (Sequence[google.cloud.videointelligence_v1p2beta1.types.LabelSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.LabelSegment]):
All video segments where a label was
detected.
- frames (Sequence[google.cloud.videointelligence_v1p2beta1.types.LabelFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.LabelFrame]):
All video frames where a label was detected.
"""
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- category_entities = proto.RepeatedField(
+ category_entities: MutableSequence["Entity"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Entity",
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["LabelSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelSegment",
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["LabelFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelFrame",
@@ -436,12 +438,12 @@ class ExplicitContentFrame(proto.Message):
Likelihood of the pornography content..
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- pornography_likelihood = proto.Field(
+ pornography_likelihood: "Likelihood" = proto.Field(
proto.ENUM,
number=2,
enum="Likelihood",
@@ -454,12 +456,12 @@ class ExplicitContentAnnotation(proto.Message):
frame, no annotations are present for that frame.
Attributes:
- frames (Sequence[google.cloud.videointelligence_v1p2beta1.types.ExplicitContentFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.ExplicitContentFrame]):
All video frames where explicit content was
detected.
"""
- frames = proto.RepeatedField(
+ frames: MutableSequence["ExplicitContentFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="ExplicitContentFrame",
@@ -481,19 +483,19 @@ class NormalizedBoundingBox(proto.Message):
Bottom Y coordinate.
"""
- left = proto.Field(
+ left: float = proto.Field(
proto.FLOAT,
number=1,
)
- top = proto.Field(
+ top: float = proto.Field(
proto.FLOAT,
number=2,
)
- right = proto.Field(
+ right: float = proto.Field(
proto.FLOAT,
number=3,
)
- bottom = proto.Field(
+ bottom: float = proto.Field(
proto.FLOAT,
number=4,
)
@@ -506,29 +508,29 @@ class VideoAnnotationResults(proto.Message):
input_uri (str):
Video file location in `Google Cloud
Storage `__.
- segment_label_annotations (Sequence[google.cloud.videointelligence_v1p2beta1.types.LabelAnnotation]):
+ segment_label_annotations (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.LabelAnnotation]):
Label annotations on video level or user
specified segment level. There is exactly one
element for each unique label.
- shot_label_annotations (Sequence[google.cloud.videointelligence_v1p2beta1.types.LabelAnnotation]):
+ shot_label_annotations (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.LabelAnnotation]):
Label annotations on shot level.
There is exactly one element for each unique
label.
- frame_label_annotations (Sequence[google.cloud.videointelligence_v1p2beta1.types.LabelAnnotation]):
+ frame_label_annotations (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.LabelAnnotation]):
Label annotations on frame level.
There is exactly one element for each unique
label.
- shot_annotations (Sequence[google.cloud.videointelligence_v1p2beta1.types.VideoSegment]):
+ shot_annotations (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.VideoSegment]):
Shot annotations. Each shot is represented as
a video segment.
explicit_annotation (google.cloud.videointelligence_v1p2beta1.types.ExplicitContentAnnotation):
Explicit content annotation.
- text_annotations (Sequence[google.cloud.videointelligence_v1p2beta1.types.TextAnnotation]):
+ text_annotations (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.TextAnnotation]):
OCR text detection and tracking.
Annotations for list of detected text snippets.
Each will have list of frame information
associated with it.
- object_annotations (Sequence[google.cloud.videointelligence_v1p2beta1.types.ObjectTrackingAnnotation]):
+ object_annotations (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.ObjectTrackingAnnotation]):
Annotations for list of objects detected and
tracked in video.
error (google.rpc.status_pb2.Status):
@@ -537,46 +539,48 @@ class VideoAnnotationResults(proto.Message):
may fail.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- segment_label_annotations = proto.RepeatedField(
+ segment_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="LabelAnnotation",
)
- shot_label_annotations = proto.RepeatedField(
+ shot_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelAnnotation",
)
- frame_label_annotations = proto.RepeatedField(
+ frame_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelAnnotation",
)
- shot_annotations = proto.RepeatedField(
+ shot_annotations: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="VideoSegment",
)
- explicit_annotation = proto.Field(
+ explicit_annotation: "ExplicitContentAnnotation" = proto.Field(
proto.MESSAGE,
number=7,
message="ExplicitContentAnnotation",
)
- text_annotations = proto.RepeatedField(
+ text_annotations: MutableSequence["TextAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=12,
message="TextAnnotation",
)
- object_annotations = proto.RepeatedField(
+ object_annotations: MutableSequence[
+ "ObjectTrackingAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=14,
message="ObjectTrackingAnnotation",
)
- error = proto.Field(
+ error: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=9,
message=status_pb2.Status,
@@ -589,12 +593,12 @@ class AnnotateVideoResponse(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_results (Sequence[google.cloud.videointelligence_v1p2beta1.types.VideoAnnotationResults]):
+ annotation_results (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.VideoAnnotationResults]):
Annotation results for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_results = proto.RepeatedField(
+ annotation_results: MutableSequence["VideoAnnotationResults"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationResults",
@@ -617,20 +621,20 @@ class VideoAnnotationProgress(proto.Message):
Time of the most recent update.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- progress_percent = proto.Field(
+ progress_percent: int = proto.Field(
proto.INT32,
number=2,
)
- start_time = proto.Field(
+ start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- update_time = proto.Field(
+ update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
@@ -643,12 +647,14 @@ class AnnotateVideoProgress(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_progress (Sequence[google.cloud.videointelligence_v1p2beta1.types.VideoAnnotationProgress]):
+ annotation_progress (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.VideoAnnotationProgress]):
Progress metadata for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_progress = proto.RepeatedField(
+ annotation_progress: MutableSequence[
+ "VideoAnnotationProgress"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationProgress",
@@ -667,11 +673,11 @@ class NormalizedVertex(proto.Message):
Y coordinate.
"""
- x = proto.Field(
+ x: float = proto.Field(
proto.FLOAT,
number=1,
)
- y = proto.Field(
+ y: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -692,11 +698,11 @@ class NormalizedBoundingPoly(proto.Message):
calculations for location of the box.
Attributes:
- vertices (Sequence[google.cloud.videointelligence_v1p2beta1.types.NormalizedVertex]):
+ vertices (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.NormalizedVertex]):
Normalized vertices of the bounding polygon.
"""
- vertices = proto.RepeatedField(
+ vertices: MutableSequence["NormalizedVertex"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="NormalizedVertex",
@@ -714,21 +720,21 @@ class TextSegment(proto.Message):
Confidence for the track of detected text. It
is calculated as the highest over all frames
where OCR detected text appears.
- frames (Sequence[google.cloud.videointelligence_v1p2beta1.types.TextFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.TextFrame]):
Information related to the frames where OCR
detected text appears.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["TextFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="TextFrame",
@@ -748,12 +754,12 @@ class TextFrame(proto.Message):
Timestamp of this frame.
"""
- rotated_bounding_box = proto.Field(
+ rotated_bounding_box: "NormalizedBoundingPoly" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingPoly",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -768,16 +774,16 @@ class TextAnnotation(proto.Message):
Attributes:
text (str):
The detected text.
- segments (Sequence[google.cloud.videointelligence_v1p2beta1.types.TextSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.TextSegment]):
All video segments where OCR detected text
appears.
"""
- text = proto.Field(
+ text: str = proto.Field(
proto.STRING,
number=1,
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["TextSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="TextSegment",
@@ -797,12 +803,12 @@ class ObjectTrackingFrame(proto.Message):
The timestamp of the frame in microseconds.
"""
- normalized_bounding_box = proto.Field(
+ normalized_bounding_box: "NormalizedBoundingBox" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingBox",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -841,32 +847,32 @@ class ObjectTrackingAnnotation(proto.Message):
confidence (float):
Object category's labeling confidence of this
track.
- frames (Sequence[google.cloud.videointelligence_v1p2beta1.types.ObjectTrackingFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p2beta1.types.ObjectTrackingFrame]):
Information corresponding to all frames where
this object track appears.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=3,
oneof="track_info",
message="VideoSegment",
)
- track_id = proto.Field(
+ track_id: int = proto.Field(
proto.INT64,
number=5,
oneof="track_info",
)
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=4,
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["ObjectTrackingFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ObjectTrackingFrame",
diff --git a/google/cloud/videointelligence_v1p3beta1/__init__.py b/google/cloud/videointelligence_v1p3beta1/__init__.py
index a5025cdf..b94e29cf 100644
--- a/google/cloud/videointelligence_v1p3beta1/__init__.py
+++ b/google/cloud/videointelligence_v1p3beta1/__init__.py
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.videointelligence import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from .services.streaming_video_intelligence_service import (
StreamingVideoIntelligenceServiceAsyncClient,
diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py
index be75e430..7d97fc8f 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py
@@ -22,6 +22,8 @@
Awaitable,
Dict,
Mapping,
+ MutableMapping,
+ MutableSequence,
Optional,
Sequence,
Tuple,
@@ -183,11 +185,11 @@ def transport(self) -> StreamingVideoIntelligenceServiceTransport:
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, StreamingVideoIntelligenceServiceTransport
] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the streaming video intelligence service client.
@@ -231,12 +233,12 @@ def __init__(
def streaming_annotate_video(
self,
- requests: AsyncIterator[
- video_intelligence.StreamingAnnotateVideoRequest
+ requests: Optional[
+ AsyncIterator[video_intelligence.StreamingAnnotateVideoRequest]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[video_intelligence.StreamingAnnotateVideoResponse]]:
r"""Performs video annotation with bidirectional
diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py
index db9c470f..58516f37 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py
@@ -21,11 +21,14 @@
Iterable,
Iterator,
Mapping,
+ MutableMapping,
+ MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
+ cast,
)
from google.api_core import client_options as client_options_lib
@@ -76,7 +79,7 @@ class StreamingVideoIntelligenceServiceClientMeta(type):
def get_transport_class(
cls,
- label: str = None,
+ label: Optional[str] = None,
) -> Type[StreamingVideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
@@ -331,8 +334,10 @@ def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, StreamingVideoIntelligenceServiceTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[
+ Union[str, StreamingVideoIntelligenceServiceTransport]
+ ] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the streaming video intelligence service client.
@@ -346,7 +351,7 @@ def __init__(
transport (Union[str, StreamingVideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
@@ -376,6 +381,7 @@ def __init__(
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
+ client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
@@ -428,10 +434,12 @@ def __init__(
def streaming_annotate_video(
self,
- requests: Iterator[video_intelligence.StreamingAnnotateVideoRequest] = None,
+ requests: Optional[
+ Iterator[video_intelligence.StreamingAnnotateVideoRequest]
+ ] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[video_intelligence.StreamingAnnotateVideoResponse]:
r"""Performs video annotation with bidirectional
diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py
index e5c59903..e1e2ad9e 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py
@@ -48,7 +48,7 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py
index 54e62e65..83b10f76 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py
@@ -48,14 +48,14 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[grpc.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
@@ -182,8 +182,8 @@ def __init__(
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py
index e2db4be7..d1b33d38 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py
@@ -50,7 +50,7 @@ class StreamingVideoIntelligenceServiceGrpcAsyncIOTransport(
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -93,15 +93,15 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[aio.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py
index 62c71a7a..29a23bd4 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py
@@ -16,7 +16,17 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
@@ -164,9 +174,9 @@ def transport(self) -> VideoIntelligenceServiceTransport:
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -210,12 +220,12 @@ def __init__(
async def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -241,7 +251,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p3beta1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
@@ -255,7 +265,7 @@ async def sample_annotate_video():
print(response)
Args:
- request (Union[google.cloud.videointelligence_v1p3beta1.types.AnnotateVideoRequest, dict]):
+ request (Optional[Union[google.cloud.videointelligence_v1p3beta1.types.AnnotateVideoRequest, dict]]):
The request object. Video annotation request.
input_uri (:class:`str`):
Input video location. Currently, only `Cloud
@@ -276,7 +286,7 @@ async def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (:class:`Sequence[google.cloud.videointelligence_v1p3beta1.types.Feature]`):
+ features (:class:`MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Feature]`):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py
index fc7a7f90..0237a3cd 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py
@@ -16,7 +16,18 @@
from collections import OrderedDict
import os
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
@@ -60,7 +71,7 @@ class VideoIntelligenceServiceClientMeta(type):
def get_transport_class(
cls,
- label: str = None,
+ label: Optional[str] = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
@@ -313,8 +324,8 @@ def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[Union[str, VideoIntelligenceServiceTransport]] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
@@ -328,7 +339,7 @@ def __init__(
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
@@ -358,6 +369,7 @@ def __init__(
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
+ client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
@@ -410,12 +422,12 @@ def __init__(
def annotate_video(
self,
- request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
+ request: Optional[Union[video_intelligence.AnnotateVideoRequest, dict]] = None,
*,
- input_uri: str = None,
- features: Sequence[video_intelligence.Feature] = None,
+ input_uri: Optional[str] = None,
+ features: Optional[MutableSequence[video_intelligence.Feature]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
+ timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
@@ -441,7 +453,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p3beta1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
@@ -476,7 +488,7 @@ def sample_annotate_video():
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- features (Sequence[google.cloud.videointelligence_v1p3beta1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Feature]):
Required. Requested video annotation
features.
diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py
index cea93f58..ca0cad53 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py
@@ -49,7 +49,7 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py
index 3767548d..292f68d7 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py
@@ -47,14 +47,14 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[grpc.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
@@ -182,8 +182,8 @@ def __init__(
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py
index 325f8386..75f495df 100644
--- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py
+++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py
@@ -49,7 +49,7 @@ class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTrans
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -92,15 +92,15 @@ def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[aio.Channel] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
diff --git a/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py b/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py
index f943f7ae..e2dec8d6 100644
--- a/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py
+++ b/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from typing import MutableMapping, MutableSequence
+
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
@@ -150,7 +152,7 @@ class AnnotateVideoRequest(proto.Message):
The video data bytes. If unset, the input video(s) should be
specified via the ``input_uri``. If set, ``input_uri`` must
be unset.
- features (Sequence[google.cloud.videointelligence_v1p3beta1.types.Feature]):
+ features (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Feature]):
Required. Requested video annotation
features.
video_context (google.cloud.videointelligence_v1p3beta1.types.VideoContext):
@@ -172,29 +174,29 @@ class AnnotateVideoRequest(proto.Message):
the region will be determined based on video file location.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- input_content = proto.Field(
+ input_content: bytes = proto.Field(
proto.BYTES,
number=6,
)
- features = proto.RepeatedField(
+ features: MutableSequence["Feature"] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="Feature",
)
- video_context = proto.Field(
+ video_context: "VideoContext" = proto.Field(
proto.MESSAGE,
number=3,
message="VideoContext",
)
- output_uri = proto.Field(
+ output_uri: str = proto.Field(
proto.STRING,
number=4,
)
- location_id = proto.Field(
+ location_id: str = proto.Field(
proto.STRING,
number=5,
)
@@ -204,7 +206,7 @@ class VideoContext(proto.Message):
r"""Video context and/or feature-specific parameters.
Attributes:
- segments (Sequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
Video segments to annotate. The segments may
overlap and are not required to be contiguous or
span the whole video. If unspecified, each video
@@ -227,47 +229,47 @@ class VideoContext(proto.Message):
Config for OBJECT_TRACKING.
"""
- segments = proto.RepeatedField(
+ segments: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- label_detection_config = proto.Field(
+ label_detection_config: "LabelDetectionConfig" = proto.Field(
proto.MESSAGE,
number=2,
message="LabelDetectionConfig",
)
- shot_change_detection_config = proto.Field(
+ shot_change_detection_config: "ShotChangeDetectionConfig" = proto.Field(
proto.MESSAGE,
number=3,
message="ShotChangeDetectionConfig",
)
- explicit_content_detection_config = proto.Field(
+ explicit_content_detection_config: "ExplicitContentDetectionConfig" = proto.Field(
proto.MESSAGE,
number=4,
message="ExplicitContentDetectionConfig",
)
- face_detection_config = proto.Field(
+ face_detection_config: "FaceDetectionConfig" = proto.Field(
proto.MESSAGE,
number=5,
message="FaceDetectionConfig",
)
- speech_transcription_config = proto.Field(
+ speech_transcription_config: "SpeechTranscriptionConfig" = proto.Field(
proto.MESSAGE,
number=6,
message="SpeechTranscriptionConfig",
)
- text_detection_config = proto.Field(
+ text_detection_config: "TextDetectionConfig" = proto.Field(
proto.MESSAGE,
number=8,
message="TextDetectionConfig",
)
- person_detection_config = proto.Field(
+ person_detection_config: "PersonDetectionConfig" = proto.Field(
proto.MESSAGE,
number=11,
message="PersonDetectionConfig",
)
- object_tracking_config = proto.Field(
+ object_tracking_config: "ObjectTrackingConfig" = proto.Field(
proto.MESSAGE,
number=13,
message="ObjectTrackingConfig",
@@ -309,24 +311,24 @@ class LabelDetectionConfig(proto.Message):
when we release a new model.
"""
- label_detection_mode = proto.Field(
+ label_detection_mode: "LabelDetectionMode" = proto.Field(
proto.ENUM,
number=1,
enum="LabelDetectionMode",
)
- stationary_camera = proto.Field(
+ stationary_camera: bool = proto.Field(
proto.BOOL,
number=2,
)
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=3,
)
- frame_confidence_threshold = proto.Field(
+ frame_confidence_threshold: float = proto.Field(
proto.FLOAT,
number=4,
)
- video_confidence_threshold = proto.Field(
+ video_confidence_threshold: float = proto.Field(
proto.FLOAT,
number=5,
)
@@ -342,7 +344,7 @@ class ShotChangeDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -358,7 +360,7 @@ class ObjectTrackingConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -374,7 +376,7 @@ class ExplicitContentDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
@@ -397,15 +399,15 @@ class FaceDetectionConfig(proto.Message):
'include_bounding_boxes' is set to false.
"""
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=1,
)
- include_bounding_boxes = proto.Field(
+ include_bounding_boxes: bool = proto.Field(
proto.BOOL,
number=2,
)
- include_attributes = proto.Field(
+ include_attributes: bool = proto.Field(
proto.BOOL,
number=5,
)
@@ -428,15 +430,15 @@ class PersonDetectionConfig(proto.Message):
'include_bounding_boxes' is set to false.
"""
- include_bounding_boxes = proto.Field(
+ include_bounding_boxes: bool = proto.Field(
proto.BOOL,
number=1,
)
- include_pose_landmarks = proto.Field(
+ include_pose_landmarks: bool = proto.Field(
proto.BOOL,
number=2,
)
- include_attributes = proto.Field(
+ include_attributes: bool = proto.Field(
proto.BOOL,
number=3,
)
@@ -446,7 +448,7 @@ class TextDetectionConfig(proto.Message):
r"""Config for TEXT_DETECTION.
Attributes:
- language_hints (Sequence[str]):
+ language_hints (MutableSequence[str]):
Language hint can be specified if the
language to be detected is known a priori. It
can increase the accuracy of the detection.
@@ -461,11 +463,11 @@ class TextDetectionConfig(proto.Message):
if unset) and "builtin/latest".
"""
- language_hints = proto.RepeatedField(
+ language_hints: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
- model = proto.Field(
+ model: str = proto.Field(
proto.STRING,
number=2,
)
@@ -485,12 +487,12 @@ class VideoSegment(proto.Message):
(inclusive).
"""
- start_time_offset = proto.Field(
+ start_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time_offset = proto.Field(
+ end_time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -507,12 +509,12 @@ class LabelSegment(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -530,12 +532,12 @@ class LabelFrame(proto.Message):
Confidence that the label is accurate. Range: [0, 1].
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -555,15 +557,15 @@ class Entity(proto.Message):
Language code for ``description`` in BCP-47 format.
"""
- entity_id = proto.Field(
+ entity_id: str = proto.Field(
proto.STRING,
number=1,
)
- description = proto.Field(
+ description: str = proto.Field(
proto.STRING,
number=2,
)
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=3,
)
@@ -575,34 +577,34 @@ class LabelAnnotation(proto.Message):
Attributes:
entity (google.cloud.videointelligence_v1p3beta1.types.Entity):
Detected entity.
- category_entities (Sequence[google.cloud.videointelligence_v1p3beta1.types.Entity]):
+ category_entities (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Entity]):
Common categories for the detected entity. For example, when
the label is ``Terrier``, the category is likely ``dog``.
And in some cases there might be more than one categories
e.g., ``Terrier`` could also be a ``pet``.
- segments (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelSegment]):
All video segments where a label was
detected.
- frames (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelFrame]):
All video frames where a label was detected.
"""
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- category_entities = proto.RepeatedField(
+ category_entities: MutableSequence["Entity"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Entity",
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["LabelSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelSegment",
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["LabelFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelFrame",
@@ -621,12 +623,12 @@ class ExplicitContentFrame(proto.Message):
Likelihood of the pornography content..
"""
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- pornography_likelihood = proto.Field(
+ pornography_likelihood: "Likelihood" = proto.Field(
proto.ENUM,
number=2,
enum="Likelihood",
@@ -639,12 +641,12 @@ class ExplicitContentAnnotation(proto.Message):
frame, no annotations are present for that frame.
Attributes:
- frames (Sequence[google.cloud.videointelligence_v1p3beta1.types.ExplicitContentFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.ExplicitContentFrame]):
All video frames where explicit content was
detected.
"""
- frames = proto.RepeatedField(
+ frames: MutableSequence["ExplicitContentFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="ExplicitContentFrame",
@@ -666,19 +668,19 @@ class NormalizedBoundingBox(proto.Message):
Bottom Y coordinate.
"""
- left = proto.Field(
+ left: float = proto.Field(
proto.FLOAT,
number=1,
)
- top = proto.Field(
+ top: float = proto.Field(
proto.FLOAT,
number=2,
)
- right = proto.Field(
+ right: float = proto.Field(
proto.FLOAT,
number=3,
)
- bottom = proto.Field(
+ bottom: float = proto.Field(
proto.FLOAT,
number=4,
)
@@ -696,29 +698,29 @@ class TimestampedObject(proto.Message):
Time-offset, relative to the beginning of the
video, corresponding to the video frame for this
object.
- attributes (Sequence[google.cloud.videointelligence_v1p3beta1.types.DetectedAttribute]):
+ attributes (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.DetectedAttribute]):
Optional. The attributes of the object in the
bounding box.
- landmarks (Sequence[google.cloud.videointelligence_v1p3beta1.types.DetectedLandmark]):
+ landmarks (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.DetectedLandmark]):
Optional. The detected landmarks.
"""
- normalized_bounding_box = proto.Field(
+ normalized_bounding_box: "NormalizedBoundingBox" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingBox",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
)
- attributes = proto.RepeatedField(
+ attributes: MutableSequence["DetectedAttribute"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="DetectedAttribute",
)
- landmarks = proto.RepeatedField(
+ landmarks: MutableSequence["DetectedLandmark"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="DetectedLandmark",
@@ -731,32 +733,32 @@ class Track(proto.Message):
Attributes:
segment (google.cloud.videointelligence_v1p3beta1.types.VideoSegment):
Video segment of a track.
- timestamped_objects (Sequence[google.cloud.videointelligence_v1p3beta1.types.TimestampedObject]):
+ timestamped_objects (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.TimestampedObject]):
The object with timestamp and attributes per
frame in the track.
- attributes (Sequence[google.cloud.videointelligence_v1p3beta1.types.DetectedAttribute]):
+ attributes (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.DetectedAttribute]):
Optional. Attributes in the track level.
confidence (float):
Optional. The confidence score of the tracked
object.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- timestamped_objects = proto.RepeatedField(
+ timestamped_objects: MutableSequence["TimestampedObject"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="TimestampedObject",
)
- attributes = proto.RepeatedField(
+ attributes: MutableSequence["DetectedAttribute"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="DetectedAttribute",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=4,
)
@@ -779,15 +781,15 @@ class DetectedAttribute(proto.Message):
"black", "blonde", etc.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- value = proto.Field(
+ value: str = proto.Field(
proto.STRING,
number=3,
)
@@ -809,15 +811,15 @@ class Celebrity(proto.Message):
about the celebrity, if applicable.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- display_name = proto.Field(
+ display_name: str = proto.Field(
proto.STRING,
number=2,
)
- description = proto.Field(
+ description: str = proto.Field(
proto.STRING,
number=3,
)
@@ -829,7 +831,7 @@ class CelebrityTrack(proto.Message):
not have any matched celebrities.
Attributes:
- celebrities (Sequence[google.cloud.videointelligence_v1p3beta1.types.CelebrityTrack.RecognizedCelebrity]):
+ celebrities (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.CelebrityTrack.RecognizedCelebrity]):
Top N match of the celebrities for the face
in this track.
face_track (google.cloud.videointelligence_v1p3beta1.types.Track):
@@ -846,22 +848,22 @@ class RecognizedCelebrity(proto.Message):
Recognition confidence. Range [0, 1].
"""
- celebrity = proto.Field(
+ celebrity: "Celebrity" = proto.Field(
proto.MESSAGE,
number=1,
message="Celebrity",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- celebrities = proto.RepeatedField(
+ celebrities: MutableSequence[RecognizedCelebrity] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=RecognizedCelebrity,
)
- face_track = proto.Field(
+ face_track: "Track" = proto.Field(
proto.MESSAGE,
number=3,
message="Track",
@@ -872,13 +874,13 @@ class CelebrityRecognitionAnnotation(proto.Message):
r"""Celebrity recognition annotation per video.
Attributes:
- celebrity_tracks (Sequence[google.cloud.videointelligence_v1p3beta1.types.CelebrityTrack]):
+ celebrity_tracks (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.CelebrityTrack]):
The tracks detected from the input video,
including recognized celebrities and other
detected faces in the video.
"""
- celebrity_tracks = proto.RepeatedField(
+ celebrity_tracks: MutableSequence["CelebrityTrack"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="CelebrityTrack",
@@ -902,16 +904,16 @@ class DetectedLandmark(proto.Message):
The confidence score of the detected landmark. Range [0, 1].
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- point = proto.Field(
+ point: "NormalizedVertex" = proto.Field(
proto.MESSAGE,
number=2,
message="NormalizedVertex",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=3,
)
@@ -921,18 +923,18 @@ class FaceDetectionAnnotation(proto.Message):
r"""Face detection annotation.
Attributes:
- tracks (Sequence[google.cloud.videointelligence_v1p3beta1.types.Track]):
+ tracks (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Track]):
The face tracks with attributes.
thumbnail (bytes):
The thumbnail of a person's face.
"""
- tracks = proto.RepeatedField(
+ tracks: MutableSequence["Track"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="Track",
)
- thumbnail = proto.Field(
+ thumbnail: bytes = proto.Field(
proto.BYTES,
number=4,
)
@@ -942,11 +944,11 @@ class PersonDetectionAnnotation(proto.Message):
r"""Person detection annotation per video.
Attributes:
- tracks (Sequence[google.cloud.videointelligence_v1p3beta1.types.Track]):
+ tracks (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Track]):
The detected tracks of a person.
"""
- tracks = proto.RepeatedField(
+ tracks: MutableSequence["Track"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Track",
@@ -962,11 +964,11 @@ class VideoAnnotationResults(proto.Message):
Storage `__.
segment (google.cloud.videointelligence_v1p3beta1.types.VideoSegment):
Video segment on which the annotation is run.
- segment_label_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
+ segment_label_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
Topical label annotations on video level or
user-specified segment level. There is exactly
one element for each unique label.
- segment_presence_label_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
+ segment_presence_label_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
Presence label annotations on video level or user-specified
segment level. There is exactly one element for each unique
label. Compared to the existing topical
@@ -975,11 +977,11 @@ class VideoAnnotationResults(proto.Message):
and is made available only when the client sets
``LabelDetectionConfig.model`` to "builtin/latest" in the
request.
- shot_label_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
+ shot_label_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
Topical label annotations on shot level.
There is exactly one element for each unique
label.
- shot_presence_label_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
+ shot_presence_label_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
Presence label annotations on shot level. There is exactly
one element for each unique label. Compared to the existing
topical ``shot_label_annotations``, this field presents more
@@ -987,31 +989,31 @@ class VideoAnnotationResults(proto.Message):
and is made available only when the client sets
``LabelDetectionConfig.model`` to "builtin/latest" in the
request.
- frame_label_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
+ frame_label_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
Label annotations on frame level.
There is exactly one element for each unique
label.
- face_detection_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.FaceDetectionAnnotation]):
+ face_detection_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.FaceDetectionAnnotation]):
Face detection annotations.
- shot_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
+ shot_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
Shot annotations. Each shot is represented as
a video segment.
explicit_annotation (google.cloud.videointelligence_v1p3beta1.types.ExplicitContentAnnotation):
Explicit content annotation.
- speech_transcriptions (Sequence[google.cloud.videointelligence_v1p3beta1.types.SpeechTranscription]):
+ speech_transcriptions (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.SpeechTranscription]):
Speech transcription.
- text_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.TextAnnotation]):
+ text_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.TextAnnotation]):
OCR text detection and tracking.
Annotations for list of detected text snippets.
Each will have list of frame information
associated with it.
- object_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.ObjectTrackingAnnotation]):
+ object_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.ObjectTrackingAnnotation]):
Annotations for list of objects detected and
tracked in video.
- logo_recognition_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.LogoRecognitionAnnotation]):
+ logo_recognition_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LogoRecognitionAnnotation]):
Annotations for list of logos detected,
tracked and recognized in video.
- person_detection_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.PersonDetectionAnnotation]):
+ person_detection_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.PersonDetectionAnnotation]):
Person detection annotations.
celebrity_recognition_annotations (google.cloud.videointelligence_v1p3beta1.types.CelebrityRecognitionAnnotation):
Celebrity recognition annotations.
@@ -1021,86 +1023,98 @@ class VideoAnnotationResults(proto.Message):
may fail.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=10,
message="VideoSegment",
)
- segment_label_annotations = proto.RepeatedField(
+ segment_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="LabelAnnotation",
)
- segment_presence_label_annotations = proto.RepeatedField(
+ segment_presence_label_annotations: MutableSequence[
+ "LabelAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=23,
message="LabelAnnotation",
)
- shot_label_annotations = proto.RepeatedField(
+ shot_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="LabelAnnotation",
)
- shot_presence_label_annotations = proto.RepeatedField(
+ shot_presence_label_annotations: MutableSequence[
+ "LabelAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=24,
message="LabelAnnotation",
)
- frame_label_annotations = proto.RepeatedField(
+ frame_label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="LabelAnnotation",
)
- face_detection_annotations = proto.RepeatedField(
+ face_detection_annotations: MutableSequence[
+ "FaceDetectionAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=13,
message="FaceDetectionAnnotation",
)
- shot_annotations = proto.RepeatedField(
+ shot_annotations: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="VideoSegment",
)
- explicit_annotation = proto.Field(
+ explicit_annotation: "ExplicitContentAnnotation" = proto.Field(
proto.MESSAGE,
number=7,
message="ExplicitContentAnnotation",
)
- speech_transcriptions = proto.RepeatedField(
+ speech_transcriptions: MutableSequence["SpeechTranscription"] = proto.RepeatedField(
proto.MESSAGE,
number=11,
message="SpeechTranscription",
)
- text_annotations = proto.RepeatedField(
+ text_annotations: MutableSequence["TextAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=12,
message="TextAnnotation",
)
- object_annotations = proto.RepeatedField(
+ object_annotations: MutableSequence[
+ "ObjectTrackingAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=14,
message="ObjectTrackingAnnotation",
)
- logo_recognition_annotations = proto.RepeatedField(
+ logo_recognition_annotations: MutableSequence[
+ "LogoRecognitionAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=19,
message="LogoRecognitionAnnotation",
)
- person_detection_annotations = proto.RepeatedField(
+ person_detection_annotations: MutableSequence[
+ "PersonDetectionAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=20,
message="PersonDetectionAnnotation",
)
- celebrity_recognition_annotations = proto.Field(
+ celebrity_recognition_annotations: "CelebrityRecognitionAnnotation" = proto.Field(
proto.MESSAGE,
number=21,
message="CelebrityRecognitionAnnotation",
)
- error = proto.Field(
+ error: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=9,
message=status_pb2.Status,
@@ -1113,12 +1127,12 @@ class AnnotateVideoResponse(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_results (Sequence[google.cloud.videointelligence_v1p3beta1.types.VideoAnnotationResults]):
+ annotation_results (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.VideoAnnotationResults]):
Annotation results for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_results = proto.RepeatedField(
+ annotation_results: MutableSequence["VideoAnnotationResults"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationResults",
@@ -1147,30 +1161,30 @@ class VideoAnnotationProgress(proto.Message):
the request contains more than one segment.
"""
- input_uri = proto.Field(
+ input_uri: str = proto.Field(
proto.STRING,
number=1,
)
- progress_percent = proto.Field(
+ progress_percent: int = proto.Field(
proto.INT32,
number=2,
)
- start_time = proto.Field(
+ start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- update_time = proto.Field(
+ update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
- feature = proto.Field(
+ feature: "Feature" = proto.Field(
proto.ENUM,
number=5,
enum="Feature",
)
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=6,
message="VideoSegment",
@@ -1183,12 +1197,14 @@ class AnnotateVideoProgress(proto.Message):
``google::longrunning::Operations`` service.
Attributes:
- annotation_progress (Sequence[google.cloud.videointelligence_v1p3beta1.types.VideoAnnotationProgress]):
+ annotation_progress (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.VideoAnnotationProgress]):
Progress metadata for all videos specified in
``AnnotateVideoRequest``.
"""
- annotation_progress = proto.RepeatedField(
+ annotation_progress: MutableSequence[
+ "VideoAnnotationProgress"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoAnnotationProgress",
@@ -1219,7 +1235,7 @@ class SpeechTranscriptionConfig(proto.Message):
character in each filtered word with asterisks, e.g. "f***".
If set to ``false`` or omitted, profanities won't be
filtered out.
- speech_contexts (Sequence[google.cloud.videointelligence_v1p3beta1.types.SpeechContext]):
+ speech_contexts (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.SpeechContext]):
Optional. A means to provide context to
assist the speech recognition.
enable_automatic_punctuation (bool):
@@ -1233,7 +1249,7 @@ class SpeechTranscriptionConfig(proto.Message):
complimentary to all users. In the future this
may be exclusively available as a premium
feature.".
- audio_tracks (Sequence[int]):
+ audio_tracks (MutableSequence[int]):
Optional. For file formats, such as MXF or
MKV, supporting multiple audio tracks, specify
up to two tracks. Default: track 0.
@@ -1257,40 +1273,40 @@ class SpeechTranscriptionConfig(proto.Message):
is ``false``.
"""
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=1,
)
- max_alternatives = proto.Field(
+ max_alternatives: int = proto.Field(
proto.INT32,
number=2,
)
- filter_profanity = proto.Field(
+ filter_profanity: bool = proto.Field(
proto.BOOL,
number=3,
)
- speech_contexts = proto.RepeatedField(
+ speech_contexts: MutableSequence["SpeechContext"] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="SpeechContext",
)
- enable_automatic_punctuation = proto.Field(
+ enable_automatic_punctuation: bool = proto.Field(
proto.BOOL,
number=5,
)
- audio_tracks = proto.RepeatedField(
+ audio_tracks: MutableSequence[int] = proto.RepeatedField(
proto.INT32,
number=6,
)
- enable_speaker_diarization = proto.Field(
+ enable_speaker_diarization: bool = proto.Field(
proto.BOOL,
number=7,
)
- diarization_speaker_count = proto.Field(
+ diarization_speaker_count: int = proto.Field(
proto.INT32,
number=8,
)
- enable_word_confidence = proto.Field(
+ enable_word_confidence: bool = proto.Field(
proto.BOOL,
number=9,
)
@@ -1301,7 +1317,7 @@ class SpeechContext(proto.Message):
words and phrases in the results.
Attributes:
- phrases (Sequence[str]):
+ phrases (MutableSequence[str]):
Optional. A list of strings containing words and phrases
"hints" so that the speech recognition is more likely to
recognize them. This can be used to improve the accuracy for
@@ -1312,7 +1328,7 @@ class SpeechContext(proto.Message):
limits `__.
"""
- phrases = proto.RepeatedField(
+ phrases: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
@@ -1323,7 +1339,7 @@ class SpeechTranscription(proto.Message):
audio.
Attributes:
- alternatives (Sequence[google.cloud.videointelligence_v1p3beta1.types.SpeechRecognitionAlternative]):
+ alternatives (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.SpeechRecognitionAlternative]):
May contain one or more recognition hypotheses (up to the
maximum specified in ``max_alternatives``). These
alternatives are ordered in terms of accuracy, with the top
@@ -1337,12 +1353,12 @@ class SpeechTranscription(proto.Message):
spoken in the audio.
"""
- alternatives = proto.RepeatedField(
+ alternatives: MutableSequence["SpeechRecognitionAlternative"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="SpeechRecognitionAlternative",
)
- language_code = proto.Field(
+ language_code: str = proto.Field(
proto.STRING,
number=2,
)
@@ -1363,22 +1379,22 @@ class SpeechRecognitionAlternative(proto.Message):
accurate and users should not rely on it to be always
provided. The default of 0.0 is a sentinel value indicating
``confidence`` was not set.
- words (Sequence[google.cloud.videointelligence_v1p3beta1.types.WordInfo]):
+ words (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.WordInfo]):
Output only. A list of word-specific information for each
recognized word. Note: When ``enable_speaker_diarization``
is set to true, you will see all the words from the
beginning of the audio.
"""
- transcript = proto.Field(
+ transcript: str = proto.Field(
proto.STRING,
number=1,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- words = proto.RepeatedField(
+ words: MutableSequence["WordInfo"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="WordInfo",
@@ -1422,25 +1438,25 @@ class WordInfo(proto.Message):
set if speaker diarization is enabled.
"""
- start_time = proto.Field(
+ start_time: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
- end_time = proto.Field(
+ end_time: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
)
- word = proto.Field(
+ word: str = proto.Field(
proto.STRING,
number=3,
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=4,
)
- speaker_tag = proto.Field(
+ speaker_tag: int = proto.Field(
proto.INT32,
number=5,
)
@@ -1458,11 +1474,11 @@ class NormalizedVertex(proto.Message):
Y coordinate.
"""
- x = proto.Field(
+ x: float = proto.Field(
proto.FLOAT,
number=1,
)
- y = proto.Field(
+ y: float = proto.Field(
proto.FLOAT,
number=2,
)
@@ -1483,11 +1499,11 @@ class NormalizedBoundingPoly(proto.Message):
calculations for location of the box.
Attributes:
- vertices (Sequence[google.cloud.videointelligence_v1p3beta1.types.NormalizedVertex]):
+ vertices (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.NormalizedVertex]):
Normalized vertices of the bounding polygon.
"""
- vertices = proto.RepeatedField(
+ vertices: MutableSequence["NormalizedVertex"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="NormalizedVertex",
@@ -1505,21 +1521,21 @@ class TextSegment(proto.Message):
Confidence for the track of detected text. It
is calculated as the highest over all frames
where OCR detected text appears.
- frames (Sequence[google.cloud.videointelligence_v1p3beta1.types.TextFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.TextFrame]):
Information related to the frames where OCR
detected text appears.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=2,
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["TextFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="TextFrame",
@@ -1539,12 +1555,12 @@ class TextFrame(proto.Message):
Timestamp of this frame.
"""
- rotated_bounding_box = proto.Field(
+ rotated_bounding_box: "NormalizedBoundingPoly" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingPoly",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -1559,16 +1575,16 @@ class TextAnnotation(proto.Message):
Attributes:
text (str):
The detected text.
- segments (Sequence[google.cloud.videointelligence_v1p3beta1.types.TextSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.TextSegment]):
All video segments where OCR detected text
appears.
"""
- text = proto.Field(
+ text: str = proto.Field(
proto.STRING,
number=1,
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["TextSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="TextSegment",
@@ -1588,12 +1604,12 @@ class ObjectTrackingFrame(proto.Message):
The timestamp of the frame in microseconds.
"""
- normalized_bounding_box = proto.Field(
+ normalized_bounding_box: "NormalizedBoundingBox" = proto.Field(
proto.MESSAGE,
number=1,
message="NormalizedBoundingBox",
)
- time_offset = proto.Field(
+ time_offset: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
@@ -1632,7 +1648,7 @@ class ObjectTrackingAnnotation(proto.Message):
confidence (float):
Object category's labeling confidence of this
track.
- frames (Sequence[google.cloud.videointelligence_v1p3beta1.types.ObjectTrackingFrame]):
+ frames (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.ObjectTrackingFrame]):
Information corresponding to all frames where
this object track appears. Non-streaming batch
mode: it may be one or multiple
@@ -1641,27 +1657,27 @@ class ObjectTrackingAnnotation(proto.Message):
ObjectTrackingFrame message in frames.
"""
- segment = proto.Field(
+ segment: "VideoSegment" = proto.Field(
proto.MESSAGE,
number=3,
oneof="track_info",
message="VideoSegment",
)
- track_id = proto.Field(
+ track_id: int = proto.Field(
proto.INT64,
number=5,
oneof="track_info",
)
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- confidence = proto.Field(
+ confidence: float = proto.Field(
proto.FLOAT,
number=4,
)
- frames = proto.RepeatedField(
+ frames: MutableSequence["ObjectTrackingFrame"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ObjectTrackingFrame",
@@ -1677,28 +1693,28 @@ class LogoRecognitionAnnotation(proto.Message):
Entity category information to specify the
logo class that all the logo tracks within this
LogoRecognitionAnnotation are recognized as.
- tracks (Sequence[google.cloud.videointelligence_v1p3beta1.types.Track]):
+ tracks (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Track]):
All logo tracks where the recognized logo
appears. Each track corresponds to one logo
instance appearing in consecutive frames.
- segments (Sequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
+ segments (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
All video segments where the recognized logo
appears. There might be multiple instances of
the same logo class appearing in one
VideoSegment.
"""
- entity = proto.Field(
+ entity: "Entity" = proto.Field(
proto.MESSAGE,
number=1,
message="Entity",
)
- tracks = proto.RepeatedField(
+ tracks: MutableSequence["Track"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Track",
)
- segments = proto.RepeatedField(
+ segments: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="VideoSegment",
@@ -1740,13 +1756,13 @@ class StreamingAnnotateVideoRequest(proto.Message):
This field is a member of `oneof`_ ``streaming_request``.
"""
- video_config = proto.Field(
+ video_config: "StreamingVideoConfig" = proto.Field(
proto.MESSAGE,
number=1,
oneof="streaming_request",
message="StreamingVideoConfig",
)
- input_content = proto.Field(
+ input_content: bytes = proto.Field(
proto.BYTES,
number=2,
oneof="streaming_request",
@@ -1800,54 +1816,58 @@ class StreamingVideoConfig(proto.Message):
is disabled.
"""
- shot_change_detection_config = proto.Field(
+ shot_change_detection_config: "StreamingShotChangeDetectionConfig" = proto.Field(
proto.MESSAGE,
number=2,
oneof="streaming_config",
message="StreamingShotChangeDetectionConfig",
)
- label_detection_config = proto.Field(
+ label_detection_config: "StreamingLabelDetectionConfig" = proto.Field(
proto.MESSAGE,
number=3,
oneof="streaming_config",
message="StreamingLabelDetectionConfig",
)
- explicit_content_detection_config = proto.Field(
- proto.MESSAGE,
- number=4,
- oneof="streaming_config",
- message="StreamingExplicitContentDetectionConfig",
+ explicit_content_detection_config: "StreamingExplicitContentDetectionConfig" = (
+ proto.Field(
+ proto.MESSAGE,
+ number=4,
+ oneof="streaming_config",
+ message="StreamingExplicitContentDetectionConfig",
+ )
)
- object_tracking_config = proto.Field(
+ object_tracking_config: "StreamingObjectTrackingConfig" = proto.Field(
proto.MESSAGE,
number=5,
oneof="streaming_config",
message="StreamingObjectTrackingConfig",
)
- automl_action_recognition_config = proto.Field(
- proto.MESSAGE,
- number=23,
- oneof="streaming_config",
- message="StreamingAutomlActionRecognitionConfig",
+ automl_action_recognition_config: "StreamingAutomlActionRecognitionConfig" = (
+ proto.Field(
+ proto.MESSAGE,
+ number=23,
+ oneof="streaming_config",
+ message="StreamingAutomlActionRecognitionConfig",
+ )
)
- automl_classification_config = proto.Field(
+ automl_classification_config: "StreamingAutomlClassificationConfig" = proto.Field(
proto.MESSAGE,
number=21,
oneof="streaming_config",
message="StreamingAutomlClassificationConfig",
)
- automl_object_tracking_config = proto.Field(
+ automl_object_tracking_config: "StreamingAutomlObjectTrackingConfig" = proto.Field(
proto.MESSAGE,
number=22,
oneof="streaming_config",
message="StreamingAutomlObjectTrackingConfig",
)
- feature = proto.Field(
+ feature: "StreamingFeature" = proto.Field(
proto.ENUM,
number=1,
enum="StreamingFeature",
)
- storage_config = proto.Field(
+ storage_config: "StreamingStorageConfig" = proto.Field(
proto.MESSAGE,
number=30,
message="StreamingStorageConfig",
@@ -1873,17 +1893,17 @@ class StreamingAnnotateVideoResponse(proto.Message):
followed by '/cloud_project_number-session_id'.
"""
- error = proto.Field(
+ error: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
- annotation_results = proto.Field(
+ annotation_results: "StreamingVideoAnnotationResults" = proto.Field(
proto.MESSAGE,
number=2,
message="StreamingVideoAnnotationResults",
)
- annotation_results_uri = proto.Field(
+ annotation_results_uri: str = proto.Field(
proto.STRING,
number=3,
)
@@ -1894,33 +1914,35 @@ class StreamingVideoAnnotationResults(proto.Message):
the video that is currently being processed.
Attributes:
- shot_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
+ shot_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]):
Shot annotation results. Each shot is
represented as a video segment.
- label_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
+ label_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.LabelAnnotation]):
Label annotation results.
explicit_annotation (google.cloud.videointelligence_v1p3beta1.types.ExplicitContentAnnotation):
Explicit content annotation results.
- object_annotations (Sequence[google.cloud.videointelligence_v1p3beta1.types.ObjectTrackingAnnotation]):
+ object_annotations (MutableSequence[google.cloud.videointelligence_v1p3beta1.types.ObjectTrackingAnnotation]):
Object tracking results.
"""
- shot_annotations = proto.RepeatedField(
+ shot_annotations: MutableSequence["VideoSegment"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VideoSegment",
)
- label_annotations = proto.RepeatedField(
+ label_annotations: MutableSequence["LabelAnnotation"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="LabelAnnotation",
)
- explicit_annotation = proto.Field(
+ explicit_annotation: "ExplicitContentAnnotation" = proto.Field(
proto.MESSAGE,
number=3,
message="ExplicitContentAnnotation",
)
- object_annotations = proto.RepeatedField(
+ object_annotations: MutableSequence[
+ "ObjectTrackingAnnotation"
+ ] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="ObjectTrackingAnnotation",
@@ -1942,7 +1964,7 @@ class StreamingLabelDetectionConfig(proto.Message):
moving objects. Default: false.
"""
- stationary_camera = proto.Field(
+ stationary_camera: bool = proto.Field(
proto.BOOL,
number=1,
)
@@ -1965,7 +1987,7 @@ class StreamingAutomlActionRecognitionConfig(proto.Message):
``projects/{project_id}/locations/{location_id}/models/{model_id}``
"""
- model_name = proto.Field(
+ model_name: str = proto.Field(
proto.STRING,
number=1,
)
@@ -1980,7 +2002,7 @@ class StreamingAutomlClassificationConfig(proto.Message):
``projects/{project_number}/locations/{location_id}/models/{model_id}``
"""
- model_name = proto.Field(
+ model_name: str = proto.Field(
proto.STRING,
number=1,
)
@@ -1995,7 +2017,7 @@ class StreamingAutomlObjectTrackingConfig(proto.Message):
``projects/{project_id}/locations/{location_id}/models/{model_id}``
"""
- model_name = proto.Field(
+ model_name: str = proto.Field(
proto.STRING,
number=1,
)
@@ -2023,11 +2045,11 @@ class StreamingStorageConfig(proto.Message):
Storage write failure.
"""
- enable_storage_annotation_result = proto.Field(
+ enable_storage_annotation_result: bool = proto.Field(
proto.BOOL,
number=1,
)
- annotation_result_storage_directory = proto.Field(
+ annotation_result_storage_directory: str = proto.Field(
proto.STRING,
number=3,
)
diff --git a/owlbot.py b/owlbot.py
new file mode 100644
index 00000000..ce738f01
--- /dev/null
+++ b/owlbot.py
@@ -0,0 +1,56 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from pathlib import Path
+import shutil
+
+import synthtool as s
+import synthtool.gcp as gcp
+from synthtool.languages import python
+
+# ----------------------------------------------------------------------------
+# Copy the generated client from the owl-bot staging directory
+# ----------------------------------------------------------------------------
+
+clean_up_generated_samples = True
+
+# Load the default version defined in .repo-metadata.json.
+default_version = json.load(open(".repo-metadata.json", "rt")).get(
+ "default_version"
+)
+
+for library in s.get_staging_dirs(default_version):
+ if clean_up_generated_samples:
+ shutil.rmtree("samples/generated_samples", ignore_errors=True)
+ clean_up_generated_samples = False
+ s.move([library], excludes=["**/gapic_version.py"])
+s.remove_staging_dirs()
+
+# ----------------------------------------------------------------------------
+# Add templated files
+# ----------------------------------------------------------------------------
+
+templated_files = gcp.CommonTemplates().py_library(
+ cov_level=100,
+ microgenerator=True,
+ versions=gcp.common.detect_versions(path="./google", default_first=True),
+)
+s.move(templated_files, excludes=[".coveragerc", ".github/release-please.yml"])
+
+python.py_samples(skip_readmes=True)
+
+# run format session for all directories which have a noxfile
+for noxfile in Path(".").glob("**/noxfile.py"):
+ s.shell.run(["nox", "-s", "format"], cwd=noxfile.parent, hide_output=False)
diff --git a/release-please-config.json b/release-please-config.json
new file mode 100644
index 00000000..8d3672fd
--- /dev/null
+++ b/release-please-config.json
@@ -0,0 +1,43 @@
+{
+ "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
+ "packages": {
+ ".": {
+ "release-type": "python",
+ "extra-files": [
+ "google/cloud/videointelligence/gapic_version.py",
+ {
+ "type": "json",
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p2beta1.json",
+ "jsonpath": "$.clientLibrary.version"
+ },
+ {
+ "type": "json",
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1beta2.json",
+ "jsonpath": "$.clientLibrary.version"
+ },
+ {
+ "type": "json",
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1.json",
+ "jsonpath": "$.clientLibrary.version"
+ },
+ {
+ "type": "json",
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p3beta1.json",
+ "jsonpath": "$.clientLibrary.version"
+ },
+ {
+ "type": "json",
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p1beta1.json",
+ "jsonpath": "$.clientLibrary.version"
+ }
+ ]
+ }
+ },
+ "release-type": "python",
+ "plugins": [
+ {
+ "type": "sentence-case"
+ }
+ ],
+ "initial-version": "0.1.0"
+}
diff --git a/samples/generated_samples/snippet_metadata_videointelligence_v1.json b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1.json
similarity index 95%
rename from samples/generated_samples/snippet_metadata_videointelligence_v1.json
rename to samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1.json
index 2723b5f7..1f03d3b6 100644
--- a/samples/generated_samples/snippet_metadata_videointelligence_v1.json
+++ b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1.json
@@ -7,7 +7,8 @@
}
],
"language": "PYTHON",
- "name": "google-cloud-videointelligence"
+ "name": "google-cloud-videointelligence",
+ "version": "0.1.0"
},
"snippets": [
{
@@ -38,7 +39,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1.types.Feature]"
},
{
"name": "retry",
@@ -122,7 +123,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1.types.Feature]"
},
{
"name": "retry",
diff --git a/samples/generated_samples/snippet_metadata_videointelligence_v1beta2.json b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1beta2.json
similarity index 95%
rename from samples/generated_samples/snippet_metadata_videointelligence_v1beta2.json
rename to samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1beta2.json
index 8aa50236..27c0821c 100644
--- a/samples/generated_samples/snippet_metadata_videointelligence_v1beta2.json
+++ b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1beta2.json
@@ -7,7 +7,8 @@
}
],
"language": "PYTHON",
- "name": "google-cloud-videointelligence"
+ "name": "google-cloud-videointelligence",
+ "version": "0.1.0"
},
"snippets": [
{
@@ -38,7 +39,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1beta2.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1beta2.types.Feature]"
},
{
"name": "retry",
@@ -122,7 +123,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1beta2.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1beta2.types.Feature]"
},
{
"name": "retry",
diff --git a/samples/generated_samples/snippet_metadata_videointelligence_v1p1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p1beta1.json
similarity index 95%
rename from samples/generated_samples/snippet_metadata_videointelligence_v1p1beta1.json
rename to samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p1beta1.json
index c7cc2330..91bdfe52 100644
--- a/samples/generated_samples/snippet_metadata_videointelligence_v1p1beta1.json
+++ b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p1beta1.json
@@ -7,7 +7,8 @@
}
],
"language": "PYTHON",
- "name": "google-cloud-videointelligence"
+ "name": "google-cloud-videointelligence",
+ "version": "0.1.0"
},
"snippets": [
{
@@ -38,7 +39,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1p1beta1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1p1beta1.types.Feature]"
},
{
"name": "retry",
@@ -122,7 +123,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1p1beta1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1p1beta1.types.Feature]"
},
{
"name": "retry",
diff --git a/samples/generated_samples/snippet_metadata_videointelligence_v1p2beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p2beta1.json
similarity index 95%
rename from samples/generated_samples/snippet_metadata_videointelligence_v1p2beta1.json
rename to samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p2beta1.json
index 2312b63e..ac1ac90e 100644
--- a/samples/generated_samples/snippet_metadata_videointelligence_v1p2beta1.json
+++ b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p2beta1.json
@@ -7,7 +7,8 @@
}
],
"language": "PYTHON",
- "name": "google-cloud-videointelligence"
+ "name": "google-cloud-videointelligence",
+ "version": "0.1.0"
},
"snippets": [
{
@@ -38,7 +39,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1p2beta1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1p2beta1.types.Feature]"
},
{
"name": "retry",
@@ -122,7 +123,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1p2beta1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1p2beta1.types.Feature]"
},
{
"name": "retry",
diff --git a/samples/generated_samples/snippet_metadata_videointelligence_v1p3beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p3beta1.json
similarity index 97%
rename from samples/generated_samples/snippet_metadata_videointelligence_v1p3beta1.json
rename to samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p3beta1.json
index 760dbf45..91041a39 100644
--- a/samples/generated_samples/snippet_metadata_videointelligence_v1p3beta1.json
+++ b/samples/generated_samples/snippet_metadata_google.cloud.videointelligence.v1p3beta1.json
@@ -7,7 +7,8 @@
}
],
"language": "PYTHON",
- "name": "google-cloud-videointelligence"
+ "name": "google-cloud-videointelligence",
+ "version": "0.1.0"
},
"snippets": [
{
@@ -191,7 +192,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1p3beta1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Feature]"
},
{
"name": "retry",
@@ -275,7 +276,7 @@
},
{
"name": "features",
- "type": "Sequence[google.cloud.videointelligence_v1p3beta1.types.Feature]"
+ "type": "MutableSequence[google.cloud.videointelligence_v1p3beta1.types.Feature]"
},
{
"name": "retry",
diff --git a/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_async.py b/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_async.py
index 9b936c08..b1f94ee8 100644
--- a/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_async.py
+++ b/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_async.py
@@ -40,7 +40,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_sync.py b/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_sync.py
index 0ce0b8bf..f55c9dd8 100644
--- a/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_sync.py
+++ b/samples/generated_samples/videointelligence_v1_generated_video_intelligence_service_annotate_video_sync.py
@@ -40,7 +40,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_async.py b/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_async.py
index 55e0238f..9d556dc4 100644
--- a/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_async.py
+++ b/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_async.py
@@ -40,7 +40,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1beta2.AnnotateVideoRequest(
- features="FACE_DETECTION",
+ features=['FACE_DETECTION'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_sync.py b/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_sync.py
index 12bdf491..c7ac492b 100644
--- a/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_sync.py
+++ b/samples/generated_samples/videointelligence_v1beta2_generated_video_intelligence_service_annotate_video_sync.py
@@ -40,7 +40,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1beta2.AnnotateVideoRequest(
- features="FACE_DETECTION",
+ features=['FACE_DETECTION'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_async.py b/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_async.py
index b24dbd80..393a87b6 100644
--- a/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_async.py
+++ b/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_async.py
@@ -40,7 +40,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p1beta1.AnnotateVideoRequest(
- features="SPEECH_TRANSCRIPTION",
+ features=['SPEECH_TRANSCRIPTION'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_sync.py b/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_sync.py
index 6ce2b3c0..788d49ca 100644
--- a/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_sync.py
+++ b/samples/generated_samples/videointelligence_v1p1beta1_generated_video_intelligence_service_annotate_video_sync.py
@@ -40,7 +40,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p1beta1.AnnotateVideoRequest(
- features="SPEECH_TRANSCRIPTION",
+ features=['SPEECH_TRANSCRIPTION'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_async.py b/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_async.py
index f95bc27a..f1b34f6b 100644
--- a/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_async.py
+++ b/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_async.py
@@ -40,7 +40,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p2beta1.AnnotateVideoRequest(
- features="OBJECT_TRACKING",
+ features=['OBJECT_TRACKING'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_sync.py b/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_sync.py
index 39e6c69c..811b15b7 100644
--- a/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_sync.py
+++ b/samples/generated_samples/videointelligence_v1p2beta1_generated_video_intelligence_service_annotate_video_sync.py
@@ -40,7 +40,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p2beta1.AnnotateVideoRequest(
- features="OBJECT_TRACKING",
+ features=['OBJECT_TRACKING'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_async.py b/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_async.py
index f5d0229f..b418c67c 100644
--- a/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_async.py
+++ b/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_async.py
@@ -40,7 +40,7 @@ async def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p3beta1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
diff --git a/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_sync.py b/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_sync.py
index f65e65bb..016c1a8e 100644
--- a/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_sync.py
+++ b/samples/generated_samples/videointelligence_v1p3beta1_generated_video_intelligence_service_annotate_video_sync.py
@@ -40,7 +40,7 @@ def sample_annotate_video():
# Initialize request argument(s)
request = videointelligence_v1p3beta1.AnnotateVideoRequest(
- features="PERSON_DETECTION",
+ features=['PERSON_DETECTION'],
)
# Make the request
diff --git a/setup.py b/setup.py
index 03efde3e..bd994d2c 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
-# Copyright 2018 Google LLC
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,32 +12,37 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+#
import io
import os
-import setuptools
+import setuptools # type: ignore
-# Package metadata.
+package_root = os.path.abspath(os.path.dirname(__file__))
name = "google-cloud-videointelligence"
-description = "Google Cloud Video Intelligence API client library"
-version = "2.8.3"
-# Should be one of:
-# 'Development Status :: 3 - Alpha'
-# 'Development Status :: 4 - Beta'
-# 'Development Status :: 5 - Production/Stable'
-release_status = "Development Status :: 5 - Production/Stable"
+
+
+description = "Google Cloud Videointelligence API client library"
+
+version = {}
+with open(
+ os.path.join(package_root, "google/cloud/videointelligence/gapic_version.py")
+) as fp:
+ exec(fp.read(), version)
+version = version["__version__"]
+
+if version[0] == "0":
+ release_status = "Development Status :: 4 - Beta"
+else:
+ release_status = "Development Status :: 5 - Production/Stable"
+
dependencies = [
- "google-api-core[grpc] >= 1.32.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*",
+ "google-api-core[grpc] >= 1.33.2, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*",
"proto-plus >= 1.22.0, <2.0.0dev",
"protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
]
-extras = {"libcst": "libcst >= 0.2.5"}
-scripts = ["scripts/fixup_keywords.py"]
-
-
-# Setup boilerplate below this line.
+url = "https://github.com/googleapis/python-videointelligence"
package_root = os.path.abspath(os.path.dirname(__file__))
@@ -44,20 +50,16 @@
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
-# Only include packages under the 'google' namespace. Do not include tests,
-# benchmarks, etc.
packages = [
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
]
-# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
-
setuptools.setup(
name=name,
version=version,
@@ -66,7 +68,7 @@
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
- url="https://github.com/googleapis/python-videointelligence",
+ url=url,
classifiers=[
release_status,
"Intended Audience :: Developers",
@@ -82,11 +84,9 @@
],
platforms="Posix; MacOS X; Windows",
packages=packages,
+ python_requires=">=3.7",
namespace_packages=namespaces,
install_requires=dependencies,
- extras_require=extras,
- python_requires=">=3.7",
- scripts=scripts,
include_package_data=True,
zip_safe=False,
)
diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt
index e69de29b..ed7f9aed 100644
--- a/testing/constraints-3.10.txt
+++ b/testing/constraints-3.10.txt
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+# This constraints file is required for unit tests.
+# List all library dependencies and extras in this file.
+google-api-core
+proto-plus
+protobuf
diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt
index e69de29b..ed7f9aed 100644
--- a/testing/constraints-3.11.txt
+++ b/testing/constraints-3.11.txt
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+# This constraints file is required for unit tests.
+# List all library dependencies and extras in this file.
+google-api-core
+proto-plus
+protobuf
diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt
index 28b41909..6f3158cc 100644
--- a/testing/constraints-3.7.txt
+++ b/testing/constraints-3.7.txt
@@ -1,11 +1,9 @@
# This constraints file is used to check that lower bounds
# are correct in setup.py
-# List *all* library dependencies and extras in this file.
+# List all library dependencies and extras in this file.
# Pin the version to the lower bound.
-#
-# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
-# Then this file should have foo==1.14.0
-google-api-core==1.32.0
+# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev",
+# Then this file should have google-cloud-foo==1.14.0
+google-api-core==1.33.2
proto-plus==1.22.0
-libcst==0.2.5
protobuf==3.19.5
diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt
index e69de29b..ed7f9aed 100644
--- a/testing/constraints-3.8.txt
+++ b/testing/constraints-3.8.txt
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+# This constraints file is required for unit tests.
+# List all library dependencies and extras in this file.
+google-api-core
+proto-plus
+protobuf
diff --git a/testing/constraints-3.9.txt b/testing/constraints-3.9.txt
index e69de29b..ed7f9aed 100644
--- a/testing/constraints-3.9.txt
+++ b/testing/constraints-3.9.txt
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+# This constraints file is required for unit tests.
+# List all library dependencies and extras in this file.
+google-api-core
+proto-plus
+protobuf