From 25d6fb7d165329d164b387dad70634925b185f42 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 27 May 2016 15:32:59 -0400 Subject: [PATCH 001/103] Convert dataset usage examples to tested snippets. --- docs/bigquery-usage.rst | 82 +++++----------- docs/bigquery_snippets.py | 198 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 220 insertions(+), 60 deletions(-) create mode 100644 docs/bigquery_snippets.py diff --git a/docs/bigquery-usage.rst b/docs/bigquery-usage.rst index edab2e1510d4..3e6a2e88a0fb 100644 --- a/docs/bigquery-usage.rst +++ b/docs/bigquery-usage.rst @@ -25,24 +25,6 @@ Authentication / Configuration >>> from gcloud import bigquery >>> client = bigquery.Client() -- Override the credentials inferred from the environment by passing explicit - ``credentials`` to one of the alternative ``classmethod`` factories, - :meth:`gcloud.bigquery.client.Client.from_service_account_json`: - - .. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client.from_service_account_json('/path/to/creds.json') - - or :meth:`gcloud.bigquery.client.Client.from_service_account_p12`: - - .. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client.from_service_account_p12( - ... '/path/to/creds.p12', 'jrandom@example.com') - - Projects -------- @@ -83,54 +65,37 @@ policies to tables as they are created: Dataset operations ~~~~~~~~~~~~~~~~~~ -Create a new dataset for the client's project: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.create() # API request - -Check for the existence of a dataset: +List datasets for the client's project: -.. doctest:: +.. literalinclude:: bigquery_snippets.py + :start-after: [START client_list_datasets] + :end-before: [END client_list_datasets] - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.exists() # API request - True +Create a new dataset for the client's project: -List datasets for the client's project: +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_create] + :end-before: [END dataset_create] -.. doctest:: +Check for the existence of a dataset: - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> datasets, next_page_token = client.list_datasets() # API request - >>> [dataset.name for dataset in datasets] - ['dataset_name'] +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_exists] + :end-before: [END dataset_exists] Refresh metadata for a dataset (to pick up changes made by another client): -.. doctest:: +Check for the existence of a dataset: - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.reload() # API request +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_reload] + :end-before: [END dataset_reload] Patch metadata for a dataset: -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> one_day_ms = 24 * 60 * 60 * 1000 - >>> dataset.patch(description='Description goes here', - ... default_table_expiration_ms=one_day_ms) # API request +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_patch] + :end-before: [END dataset_patch] Replace the ACL for a dataset, and update all writeable fields: @@ -147,12 +112,9 @@ Replace the ACL for a dataset, and update all writeable fields: Delete a dataset: -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.delete() # API request +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_delete] + :end-before: [END dataset_delete] Tables diff --git a/docs/bigquery_snippets.py b/docs/bigquery_snippets.py new file mode 100644 index 000000000000..0a1778efd860 --- /dev/null +++ b/docs/bigquery_snippets.py @@ -0,0 +1,198 @@ +# Copyright 2016 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Testable usage examples for Google Cloud BigQuery API wrapper + +Each example function takes a ``client`` argument (which must be an instance +of :class:`gcloud.bigquery.client.Client`) and uses it to perform a task with +the API. + +To facility running the examples as system tests, each example is also passed +a ``to_delete`` list; the function adds to the list any objects created which +need to be deleted during teardown. +""" + +import time + +from gcloud.bigquery.client import Client + + +def snippet(func): + """Mark ``func`` as a snippet example function.""" + func._snippet = True + return func + + +def _millis(): + return time.time() * 1000 + + +@snippet +def client_list_datasets(client, to_delete): # pylint: disable=unused-argument + """List datasets for a project.""" + + def do_something_with(sub): # pylint: disable=unused-argument + pass + + # [START client_list_datasets] + datasets, token = client.list_datasets() # API request + while True: + for dataset in datasets: + do_something_with(dataset) + if token is None: + break + datasets, token = client.list_datasets(page_token=token) # API request + # [END client_list_datasets] + + +@snippet +def dataset_create(client, to_delete): + """Create a dataset.""" + DATASET_NAME = 'dataset_create_%d' % (_millis(),) + + # [START dataset_create] + dataset = client.dataset(DATASET_NAME) + dataset.create() # API request + # [END dataset_create] + + to_delete.append(dataset) + + +@snippet +def dataset_exists(client, to_delete): + """Test existence of a dataset.""" + DATASET_NAME = 'dataset_exists_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + to_delete.append(dataset) + + # [START dataset_exists] + assert not dataset.exists() # API request + dataset.create() # API request + assert dataset.exists() # API request + # [END dataset_exists] + + +@snippet +def dataset_reload(client, to_delete): + """Reload a dataset's metadata.""" + DATASET_NAME = 'dataset_reload_%d' % (_millis(),) + ORIGINAL_DESCRIPTION = 'Original description' + LOCALLY_CHANGED_DESCRIPTION = 'Locally-changed description' + dataset = client.dataset(DATASET_NAME) + dataset.description = ORIGINAL_DESCRIPTION + dataset.create() + to_delete.append(dataset) + + # [START dataset_reload] + assert dataset.description == ORIGINAL_DESCRIPTION + dataset.description = LOCALLY_CHANGED_DESCRIPTION + assert dataset.description == LOCALLY_CHANGED_DESCRIPTION + dataset.reload() # API request + assert dataset.description == ORIGINAL_DESCRIPTION + # [END dataset_reload] + + +@snippet +def dataset_patch(client, to_delete): + """Patch a dataset's metadata.""" + DATASET_NAME = 'dataset_patch_%d' % (_millis(),) + ORIGINAL_DESCRIPTION = 'Original description' + PATCHED_DESCRIPTION = 'Patched description' + dataset = client.dataset(DATASET_NAME) + dataset.description = ORIGINAL_DESCRIPTION + dataset.create() + to_delete.append(dataset) + + # [START dataset_patch] + ONE_DAY_MS = 24 * 60 * 60 * 1000 + assert dataset.description == ORIGINAL_DESCRIPTION + dataset.patch( + description=PATCHED_DESCRIPTION, + default_table_expiration_ms=ONE_DAY_MS + ) # API request + assert dataset.description == PATCHED_DESCRIPTION + assert dataset.default_table_expiration_ms == ONE_DAY_MS + # [END dataset_patch] + + +@snippet +def dataset_update(client, to_delete): + """Update a dataset's metadata.""" + DATASET_NAME = 'dataset_update_%d' % (_millis(),) + ORIGINAL_DESCRIPTION = 'Original description' + UPDATED_DESCRIPTION = 'Updated description' + dataset = client.dataset(DATASET_NAME) + dataset.description = ORIGINAL_DESCRIPTION + dataset.create() + to_delete.append(dataset) + dataset.reload() + + # [START dataset_update] + from gcloud.bigquery import AccessGrant + assert dataset.description == ORIGINAL_DESCRIPTION + assert dataset.default_table_expiration_ms == None + grant = AccessGrant( + role='READER', entity_type='domain', entity_id='example.com') + assert grant not in dataset.access_grants + ONE_DAY_MS = 24 * 60 * 60 * 1000 + dataset.description = UPDATED_DESCRIPTION + dataset.default_table_expiration_ms = ONE_DAY_MS + grants = list(dataset.access_grants) + grants.append(grant) + dataset.access_grants = grants + dataset.update() # API request + assert dataset.description == UPDATED_DESCRIPTION + assert dataset.default_table_expiration_ms == ONE_DAY_MS + assert grant in dataset.access_grants + # [END dataset_update] + + +@snippet +def dataset_delete(client, to_delete): # pylint: disable=unused-argument + """Delete a dataset.""" + DATASET_NAME = 'dataset_delete_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + + # [START dataset_delete] + assert dataset.exists() # API request + dataset.delete() + assert not dataset.exists() # API request + # [END dataset_delete] + + +def _find_examples(): + funcs = [obj for obj in globals().values() + if getattr(obj, '_snippet', False)] + for func in sorted(funcs, key=lambda f: f.func_code.co_firstlineno): + yield func + + +def main(): + client = Client() + for example in _find_examples(): + to_delete = [] + print('%-25s: %s' % ( + example.func_name, example.func_doc)) + try: + example(client, to_delete) + except AssertionError as e: + print(' FAIL: %s' % (e,)) + except Exception as e: # pylint: disable=broad-except + print(' ERROR: %r' % (e,)) + for item in to_delete: + item.delete() + +if __name__ == '__main__': + main() From 3afd95b4d0a19b67f9d498cb7c4ede87c173c099 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 27 May 2016 22:39:22 -0400 Subject: [PATCH 002/103] Appease new flake8 checker. --- docs/bigquery_snippets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/bigquery_snippets.py b/docs/bigquery_snippets.py index 0a1778efd860..fe037efde027 100644 --- a/docs/bigquery_snippets.py +++ b/docs/bigquery_snippets.py @@ -141,7 +141,7 @@ def dataset_update(client, to_delete): # [START dataset_update] from gcloud.bigquery import AccessGrant assert dataset.description == ORIGINAL_DESCRIPTION - assert dataset.default_table_expiration_ms == None + assert dataset.default_table_expiration_ms is None grant = AccessGrant( role='READER', entity_type='domain', entity_id='example.com') assert grant not in dataset.access_grants From 8f00567e30f415900aae1e37b91bf1229745cf70 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 7 Jun 2016 12:43:25 -0400 Subject: [PATCH 003/103] Docstring typo. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1832#discussion_r66012177 --- docs/bigquery_snippets.py | 2 +- docs/pubsub_snippets.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/bigquery_snippets.py b/docs/bigquery_snippets.py index fe037efde027..923d3fa12b50 100644 --- a/docs/bigquery_snippets.py +++ b/docs/bigquery_snippets.py @@ -18,7 +18,7 @@ of :class:`gcloud.bigquery.client.Client`) and uses it to perform a task with the API. -To facility running the examples as system tests, each example is also passed +To facilitate running the examples as system tests, each example is also passed a ``to_delete`` list; the function adds to the list any objects created which need to be deleted during teardown. """ diff --git a/docs/pubsub_snippets.py b/docs/pubsub_snippets.py index f21a35538853..cd0450e7281d 100644 --- a/docs/pubsub_snippets.py +++ b/docs/pubsub_snippets.py @@ -18,7 +18,7 @@ of :class:`gcloud.pubsub.client.Client`) and uses it to perform a task with the API. -To facility running the examples as system tests, each example is also passed +To facilitate running the examples as system tests, each example is also passed a ``to_delete`` list; the function adds to the list any objects created which need to be deleted during teardown. """ From 6afb58a366bd88916afc2ff3debd1987a432624b Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 7 Jun 2016 12:44:52 -0400 Subject: [PATCH 004/103] Remove stray line. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1832#discussion-diff-66012361 --- docs/bigquery-usage.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/bigquery-usage.rst b/docs/bigquery-usage.rst index 3e6a2e88a0fb..8f74fbe34ae4 100644 --- a/docs/bigquery-usage.rst +++ b/docs/bigquery-usage.rst @@ -85,8 +85,6 @@ Check for the existence of a dataset: Refresh metadata for a dataset (to pick up changes made by another client): -Check for the existence of a dataset: - .. literalinclude:: bigquery_snippets.py :start-after: [START dataset_reload] :end-before: [END dataset_reload] From 9d9280816f24b74e0bce926ab73585950dc39b7a Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 7 Jun 2016 15:31:32 -0400 Subject: [PATCH 005/103] Use Sphinx's ':envvar:' role. --- docs/bigquery-usage.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/bigquery-usage.rst b/docs/bigquery-usage.rst index 8f74fbe34ae4..3f8546aa171e 100644 --- a/docs/bigquery-usage.rst +++ b/docs/bigquery-usage.rst @@ -16,8 +16,8 @@ Authentication / Configuration and :meth:`from_service_account_p12 `. -- After setting ``GOOGLE_APPLICATION_CREDENTIALS`` and ``GCLOUD_PROJECT`` - environment variables, create an instance of +- After setting :envvar:`GOOGLE_APPLICATION_CREDENTIALS` and + :envvar:`GCLOUD_PROJECT` environment variables, create an instance of :class:`Client `. .. doctest:: From 47c2f0efd7739e796b6b3ef234cc5e2daf87467d Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 22 Jun 2016 17:27:02 -0400 Subject: [PATCH 006/103] _LoggingAPI: impedance matching with JSON API. --- gcloud/logging/_gax.py | 34 ++++++++++++++++++++------- gcloud/logging/test__gax.py | 46 ++++++++++++++++++++++++++----------- 2 files changed, 58 insertions(+), 22 deletions(-) diff --git a/gcloud/logging/_gax.py b/gcloud/logging/_gax.py index 88780d92326c..ef262f2ff095 100644 --- a/gcloud/logging/_gax.py +++ b/gcloud/logging/_gax.py @@ -32,6 +32,8 @@ from gcloud.exceptions import Conflict from gcloud.exceptions import NotFound from gcloud._helpers import _datetime_to_pb_timestamp +from gcloud._helpers import _datetime_to_rfc3339 +from gcloud._helpers import _pb_timestamp_to_datetime class _LoggingAPI(object): @@ -397,6 +399,22 @@ def _build_paging_options(page_token=None): return CallOptions(**options) +def _mon_resource_pb_to_mapping(resource_pb): + """Helper for :func:_log_entry_pb_to_mapping""" + mapping = { + 'type': resource_pb.type, + } + if resource_pb.labels: + mapping['labels'] = resource_pb.labels + return mapping + + +def _pb_timestamp_to_rfc3339(timestamp_pb): + """Helper for :func:_log_entry_pb_to_mapping""" + timestamp = _pb_timestamp_to_datetime(timestamp_pb) + return _datetime_to_rfc3339(timestamp) + + def _log_entry_pb_to_mapping(entry_pb): """Helper for :meth:`list_entries`, et aliae @@ -405,20 +423,20 @@ def _log_entry_pb_to_mapping(entry_pb): https://github.com/google/protobuf/issues/1351 """ mapping = { - 'log_name': entry_pb.log_name, - 'resource': entry_pb.resource, + 'logName': entry_pb.log_name, + 'resource': _mon_resource_pb_to_mapping(entry_pb.resource), 'severity': entry_pb.severity, - 'insert_id': entry_pb.insert_id, - 'timestamp': entry_pb.timestamp, + 'insertId': entry_pb.insert_id, + 'timestamp': _pb_timestamp_to_rfc3339(entry_pb.timestamp), 'labels': entry_pb.labels, - 'text_payload': entry_pb.text_payload, - 'json_payload': entry_pb.json_payload, - 'proto_payload': entry_pb.proto_payload, + 'textPayload': entry_pb.text_payload, + 'jsonPayload': entry_pb.json_payload, + 'protoPayload': entry_pb.proto_payload, } if entry_pb.http_request: request = entry_pb.http_request - mapping['http_request'] = { + mapping['httpRequest'] = { 'request_method': request.request_method, 'request_url': request.request_url, 'status': request.status, diff --git a/gcloud/logging/test__gax.py b/gcloud/logging/test__gax.py index 773384fc431f..51e8fcb06f4d 100644 --- a/gcloud/logging/test__gax.py +++ b/gcloud/logging/test__gax.py @@ -64,9 +64,9 @@ def test_list_entries_no_paging(self): self.assertEqual(len(entries), 1) entry = entries[0] self.assertIsInstance(entry, dict) - self.assertEqual(entry['log_name'], self.LOG_NAME) + self.assertEqual(entry['logName'], self.LOG_NAME) self.assertEqual(entry['resource'], {'type': 'global'}) - self.assertEqual(entry['text_payload'], TEXT) + self.assertEqual(entry['textPayload'], TEXT) self.assertEqual(next_token, TOKEN) projects, filter_, order_by, page_size, options = ( @@ -94,9 +94,9 @@ def test_list_entries_with_paging(self): self.assertEqual(len(entries), 1) entry = entries[0] self.assertIsInstance(entry, dict) - self.assertEqual(entry['log_name'], self.LOG_NAME) + self.assertEqual(entry['logName'], self.LOG_NAME) self.assertEqual(entry['resource'], {'type': 'global'}) - self.assertEqual(entry['json_payload'], PAYLOAD) + self.assertEqual(entry['jsonPayload'], PAYLOAD) self.assertEqual(next_token, NEW_TOKEN) projects, filter_, order_by, page_size, options = ( @@ -108,7 +108,12 @@ def test_list_entries_with_paging(self): self.assertEqual(options.page_token, TOKEN) def test_list_entries_with_extra_properties(self): + from datetime import datetime from gcloud._testing import _GAXPageIterator + from gcloud._helpers import UTC + from gcloud._helpers import _datetime_to_rfc3339 + from gcloud._helpers import _datetime_to_pb_timestamp + NOW = datetime.utcnow().replace(tzinfo=UTC) SIZE = 23 TOKEN = 'TOKEN' NEW_TOKEN = 'NEW_TOKEN' @@ -128,6 +133,8 @@ def test_list_entries_with_extra_properties(self): 'operation': operation, } ENTRY = _LogEntryPB(self.LOG_NAME, proto_payload=PAYLOAD, **EXTRAS) + ENTRY.resource.labels['foo'] = 'bar' + ENTRY.timestamp = _datetime_to_pb_timestamp(NOW) response = _GAXPageIterator([ENTRY], NEW_TOKEN) gax_api = _GAXLoggingAPI(_list_log_entries_response=response) api = self._makeOne(gax_api) @@ -138,12 +145,14 @@ def test_list_entries_with_extra_properties(self): self.assertEqual(len(entries), 1) entry = entries[0] self.assertIsInstance(entry, dict) - self.assertEqual(entry['log_name'], self.LOG_NAME) - self.assertEqual(entry['resource'], {'type': 'global'}) - self.assertEqual(entry['proto_payload'], PAYLOAD) + self.assertEqual(entry['logName'], self.LOG_NAME) + self.assertEqual(entry['resource'], + {'type': 'global', 'labels': {'foo': 'bar'}}) + self.assertEqual(entry['protoPayload'], PAYLOAD) self.assertEqual(entry['severity'], SEVERITY) self.assertEqual(entry['labels'], LABELS) - self.assertEqual(entry['insert_id'], IID) + self.assertEqual(entry['insertId'], IID) + self.assertEqual(entry['timestamp'], _datetime_to_rfc3339(NOW)) EXPECTED_REQUEST = { 'request_method': request.request_method, 'request_url': request.request_url, @@ -155,7 +164,7 @@ def test_list_entries_with_extra_properties(self): 'remote_ip': request.remote_ip, 'cache_hit': request.cache_hit, } - self.assertEqual(entry['http_request'], EXPECTED_REQUEST) + self.assertEqual(entry['httpRequest'], EXPECTED_REQUEST) EXPECTED_OPERATION = { 'producer': operation.producer, 'id': operation.id, @@ -302,17 +311,19 @@ def test_write_entries_w_extra_properties(self): def test_write_entries_multiple(self): # pylint: disable=too-many-statements + import datetime from google.logging.type.log_severity_pb2 import WARNING from google.logging.v2.log_entry_pb2 import LogEntry from google.protobuf.any_pb2 import Any from google.protobuf.struct_pb2 import Struct + from gcloud._helpers import _datetime_to_rfc3339, UTC TEXT = 'TEXT' - TIMESTAMP = _LogEntryPB._make_timestamp() + NOW = datetime.datetime.utcnow().replace(tzinfo=UTC) TIMESTAMP_TYPE_URL = 'type.googleapis.com/google.protobuf.Timestamp' JSON = {'payload': 'PAYLOAD', 'type': 'json'} PROTO = { '@type': TIMESTAMP_TYPE_URL, - 'value': TIMESTAMP, + 'value': _datetime_to_rfc3339(NOW), } PRODUCER = 'PRODUCER' OPID = 'OPID' @@ -956,6 +967,13 @@ class _LogEntryOperationPB(object): id = 'OPID' +class _ResourcePB(object): + + def __init__(self, type_='global', **labels): + self.type = type_ + self.labels = labels + + class _LogEntryPB(object): severity = 'DEFAULT' @@ -964,7 +982,7 @@ class _LogEntryPB(object): def __init__(self, log_name, **kw): self.log_name = log_name - self.resource = {'type': 'global'} + self.resource = _ResourcePB() self.timestamp = self._make_timestamp() self.labels = kw.pop('labels', {}) self.__dict__.update(kw) @@ -973,9 +991,9 @@ def __init__(self, log_name, **kw): def _make_timestamp(): from datetime import datetime from gcloud._helpers import UTC - from gcloud.logging.test_entries import _datetime_to_rfc3339_w_nanos + from gcloud._helpers import _datetime_to_pb_timestamp NOW = datetime.utcnow().replace(tzinfo=UTC) - return _datetime_to_rfc3339_w_nanos(NOW) + return _datetime_to_pb_timestamp(NOW) class _LogSinkPB(object): From a4022b6ba097f4528846e97dbfd4d35bfe283674 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 23 Jun 2016 12:12:52 -0400 Subject: [PATCH 007/103] Allow logging-over-gRPC. Available when 'grpcio' and related packages are importable. Enable via 'GCLOUD_ENABLE_GAX' environment variable. Disabled by default due to #1873. --- gcloud/logging/client.py | 47 +++++++++++-- gcloud/logging/test_client.py | 120 ++++++++++++++++++++++++++++++++-- 2 files changed, 155 insertions(+), 12 deletions(-) diff --git a/gcloud/logging/client.py b/gcloud/logging/client.py index d776a594cbfe..8b4aae0bdf46 100644 --- a/gcloud/logging/client.py +++ b/gcloud/logging/client.py @@ -14,11 +14,31 @@ """Client for interacting with the Google Cloud Logging API.""" +import os + +try: + from google.logging.v2.config_service_v2_api import ( + ConfigServiceV2Api as GeneratedSinksAPI) + from google.logging.v2.logging_service_v2_api import ( + LoggingServiceV2Api as GeneratedLoggingAPI) + from google.logging.v2.metrics_service_v2_api import ( + MetricsServiceV2Api as GeneratedMetricsAPI) + from gcloud.logging._gax import _LoggingAPI as GAXLoggingAPI + from gcloud.logging._gax import _MetricsAPI as GAXMetricsAPI + from gcloud.logging._gax import _SinksAPI as GAXSinksAPI +except ImportError: # pragma: NO COVER + _HAVE_GAX = False + GeneratedLoggingAPI = GAXLoggingAPI = None + GeneratedMetricsAPI = GAXMetricsAPI = None + GeneratedSinksAPI = GAXSinksAPI = None +else: + _HAVE_GAX = True + from gcloud.client import JSONClient from gcloud.logging.connection import Connection -from gcloud.logging.connection import _LoggingAPI -from gcloud.logging.connection import _MetricsAPI -from gcloud.logging.connection import _SinksAPI +from gcloud.logging.connection import _LoggingAPI as JSONLoggingAPI +from gcloud.logging.connection import _MetricsAPI as JSONMetricsAPI +from gcloud.logging.connection import _SinksAPI as JSONSinksAPI from gcloud.logging.entries import ProtobufEntry from gcloud.logging.entries import StructEntry from gcloud.logging.entries import TextEntry @@ -27,6 +47,9 @@ from gcloud.logging.sink import Sink +_USE_GAX = _HAVE_GAX and (os.environ.get('GCLOUD_ENABLE_GAX') is not None) + + class Client(JSONClient): """Client to bundle configuration needed for API requests. @@ -60,7 +83,11 @@ def logging_api(self): https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs """ if self._logging_api is None: - self._logging_api = _LoggingAPI(self.connection) + if _USE_GAX: + generated = GeneratedLoggingAPI() + self._logging_api = GAXLoggingAPI(generated) + else: + self._logging_api = JSONLoggingAPI(self.connection) return self._logging_api @property @@ -71,7 +98,11 @@ def sinks_api(self): https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks """ if self._sinks_api is None: - self._sinks_api = _SinksAPI(self.connection) + if _USE_GAX: + generated = GeneratedSinksAPI() + self._sinks_api = GAXSinksAPI(generated) + else: + self._sinks_api = JSONSinksAPI(self.connection) return self._sinks_api @property @@ -82,7 +113,11 @@ def metrics_api(self): https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics """ if self._metrics_api is None: - self._metrics_api = _MetricsAPI(self.connection) + if _USE_GAX: + generated = GeneratedMetricsAPI() + self._metrics_api = GAXMetricsAPI(generated) + else: + self._metrics_api = JSONMetricsAPI(self.connection) return self._metrics_api def logger(self, name): diff --git a/gcloud/logging/test_client.py b/gcloud/logging/test_client.py index 4a5b8c7c205d..4d42a6b7ca06 100644 --- a/gcloud/logging/test_client.py +++ b/gcloud/logging/test_client.py @@ -38,39 +38,147 @@ def test_ctor(self): client = self._makeOne(project=self.PROJECT, credentials=creds) self.assertEqual(client.project, self.PROJECT) - def test_logging_api(self): + def test_logging_api_wo_gax(self): from gcloud.logging.connection import _LoggingAPI + from gcloud.logging import client as MUT + from gcloud._testing import _Monkey client = self._makeOne(self.PROJECT, credentials=_Credentials()) conn = client.connection = object() - api = client.logging_api + + with _Monkey(MUT, _USE_GAX=False): + api = client.logging_api + self.assertTrue(isinstance(api, _LoggingAPI)) self.assertTrue(api._connection is conn) # API instance is cached again = client.logging_api self.assertTrue(again is api) - def test_sinks_api(self): + def test_logging_api_w_gax(self): + from gcloud.logging import client as MUT + from gcloud._testing import _Monkey + + wrapped = object() + _called_with = [] + + def _generated_api(*args, **kw): + _called_with.append((args, kw)) + return wrapped + + class _GaxLoggingAPI(object): + + def __init__(self, _wrapped): + self._wrapped = _wrapped + + creds = _Credentials() + client = self._makeOne(project=self.PROJECT, credentials=creds) + + with _Monkey(MUT, + _USE_GAX=True, + GeneratedLoggingAPI=_generated_api, + GAXLoggingAPI=_GaxLoggingAPI): + api = client.logging_api + + self.assertIsInstance(api, _GaxLoggingAPI) + self.assertTrue(api._wrapped is wrapped) + # API instance is cached + again = client.logging_api + self.assertTrue(again is api) + + def test_sinks_api_wo_gax(self): from gcloud.logging.connection import _SinksAPI + from gcloud.logging import client as MUT + from gcloud._testing import _Monkey client = self._makeOne(self.PROJECT, credentials=_Credentials()) conn = client.connection = object() - api = client.sinks_api + + with _Monkey(MUT, _USE_GAX=False): + api = client.sinks_api + self.assertTrue(isinstance(api, _SinksAPI)) self.assertTrue(api._connection is conn) # API instance is cached again = client.sinks_api self.assertTrue(again is api) - def test_metrics_api(self): + def test_sinks_api_w_gax(self): + from gcloud.logging import client as MUT + from gcloud._testing import _Monkey + + wrapped = object() + _called_with = [] + + def _generated_api(*args, **kw): + _called_with.append((args, kw)) + return wrapped + + class _GaxSinksAPI(object): + + def __init__(self, _wrapped): + self._wrapped = _wrapped + + creds = _Credentials() + client = self._makeOne(project=self.PROJECT, credentials=creds) + + with _Monkey(MUT, + _USE_GAX=True, + GeneratedSinksAPI=_generated_api, + GAXSinksAPI=_GaxSinksAPI): + api = client.sinks_api + + self.assertIsInstance(api, _GaxSinksAPI) + self.assertTrue(api._wrapped is wrapped) + # API instance is cached + again = client.sinks_api + self.assertTrue(again is api) + + def test_metrics_api_wo_gax(self): from gcloud.logging.connection import _MetricsAPI + from gcloud.logging import client as MUT + from gcloud._testing import _Monkey client = self._makeOne(self.PROJECT, credentials=_Credentials()) conn = client.connection = object() - api = client.metrics_api + + with _Monkey(MUT, _USE_GAX=False): + api = client.metrics_api + self.assertTrue(isinstance(api, _MetricsAPI)) self.assertTrue(api._connection is conn) # API instance is cached again = client.metrics_api self.assertTrue(again is api) + def test_metrics_api_w_gax(self): + from gcloud.logging import client as MUT + from gcloud._testing import _Monkey + + wrapped = object() + _called_with = [] + + def _generated_api(*args, **kw): + _called_with.append((args, kw)) + return wrapped + + class _GaxMetricsAPI(object): + + def __init__(self, _wrapped): + self._wrapped = _wrapped + + creds = _Credentials() + client = self._makeOne(project=self.PROJECT, credentials=creds) + + with _Monkey(MUT, + _USE_GAX=True, + GeneratedMetricsAPI=_generated_api, + GAXMetricsAPI=_GaxMetricsAPI): + api = client.metrics_api + + self.assertIsInstance(api, _GaxMetricsAPI) + self.assertTrue(api._wrapped is wrapped) + # API instance is cached + again = client.metrics_api + self.assertTrue(again is api) + def test_logger(self): from gcloud.logging.logger import Logger creds = _Credentials() From da7cd1f7d61644f51b4137ccda4e3a6c1b5a61b7 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 24 Jun 2016 14:47:14 -0400 Subject: [PATCH 008/103] Adjust anchor scroll. closes #1897. --- docs/_static/css/main.css | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/_static/css/main.css b/docs/_static/css/main.css index 1cc82f948563..6b2e6d0e1903 100644 --- a/docs/_static/css/main.css +++ b/docs/_static/css/main.css @@ -23,6 +23,14 @@ html { line-height: 1.4; } +*[id]:before { + display: block; + content: " "; + margin-top: -70px; + height: 70px; + visibility: hidden; +} + /* * Remove text-shadow in selection highlight: h5bp.com/i * These selection rule sets have to be separate. From 033759041a50bc1ec4e554b3692f3eba09c5c339 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 24 Jun 2016 15:05:00 -0400 Subject: [PATCH 009/103] Add Logging to README.md. closes #1898. --- README.rst | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/README.rst b/README.rst index 0cab79fee119..faff3287a049 100644 --- a/README.rst +++ b/README.rst @@ -20,12 +20,14 @@ This client supports the following Google Cloud Platform services: - `Google Cloud Pub/Sub`_ - `Google BigQuery`_ - `Google Cloud Resource Manager`_ +- `Google Cloud Logging`_ .. _Google Cloud Datastore: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-datastore .. _Google Cloud Storage: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-storage .. _Google Cloud Pub/Sub: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-pubsub .. _Google BigQuery: https://github.com/GoogleCloudPlatform/gcloud-python#google-bigquery .. _Google Cloud Resource Manager: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-resource-manager +.. _Google Cloud Logging: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-logging If you need support for other Google APIs, check out the `Google APIs Python Client library`_. @@ -194,6 +196,28 @@ manage projects using this Client Library. .. _Resource Manager documentation: https://googlecloudplatform.github.io/gcloud-python/stable/resource-manager-api.html +Google Cloud Logging +-------------------- + +`Stackdriver Logging`_ API (`Logging API docs`_) allows you to store, search, +analyze, monitor, and alert on log data and events from Google Cloud Platform. + +.. _Stackdriver Logging: https://cloud.google.com/logging/ +.. _Logging API docs: https://cloud.google.com/logging/docs/ + +.. code:: python + + from gcloud import logging + client = logging.Client() + logger = client.logger('log_name') + logger.log_text("A simple entry") # API call + + +See the ``gcloud-python`` API `logging documentation`_ to learn how to connect +to Cloud logging using this Client Library. + +.. _logging documentation: https://googlecloudplatform.github.io/gcloud-python/stable/logging-usage.html + Contributing ------------ From 43d9637d5949879e296cbd5b26751c26c37b8bf8 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 24 Jun 2016 15:09:31 -0400 Subject: [PATCH 010/103] Add BigQuery example to README.md. closes #1899 --- README.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.rst b/README.rst index 0cab79fee119..b200cd009c63 100644 --- a/README.rst +++ b/README.rst @@ -174,6 +174,12 @@ append-only tables, using the processing power of Google's infrastructure. This package is still being implemented, but it is almost complete! +.. code:: python + from gcloud import bigquery + client = bigquery.Client() + dataset = client.dataset('dataset_name') + dataset.create() # API request + See the ``gcloud-python`` API `BigQuery documentation`_ to learn how to connect to BigQuery using this Client Library. From a43b2eb98406cd747eae46c62d9f1a2755615c8f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 24 Jun 2016 15:59:23 -0400 Subject: [PATCH 011/103] Split 'Makefile' into datastore and bigtable portions. Pass the 'PROTOC_CMD' and 'GRPC_PLUGIN' values through from make's environment to the scripts used to pick apart GRPC-specific code. --- Makefile => Makefile.bigtable_v1 | 30 +++++------------- Makefile.datastore | 53 ++++++++++++++++++++++++++++++++ scripts/make_datastore_grpc.py | 5 ++- scripts/make_operations_grpc.py | 4 +-- 4 files changed, 65 insertions(+), 27 deletions(-) rename Makefile => Makefile.bigtable_v1 (72%) create mode 100644 Makefile.datastore diff --git a/Makefile b/Makefile.bigtable_v1 similarity index 72% rename from Makefile rename to Makefile.bigtable_v1 index 3e7e63e6e26a..a1a85f1b18a4 100644 --- a/Makefile +++ b/Makefile.bigtable_v1 @@ -1,9 +1,9 @@ GENERATED_DIR=$(shell pwd)/generated_python BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/_generated -DATASTORE_DIR=$(shell pwd)/gcloud/datastore/_generated GRPC_PLUGIN=grpc_python_plugin PROTOC_CMD=protoc -BIGTABLE_PROTOS_DIR=$(shell pwd)/cloud-bigtable-client/bigtable-protos/src/main/proto +BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client +BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-protos/src/main/proto GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb help: @@ -15,8 +15,8 @@ help: generate: # Retrieve git repos that have our *.proto files. - [ -d cloud-bigtable-client ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 - cd cloud-bigtable-client && git pull origin master + [ -d $(BIGTABLE_CHECKOUT_DIR) ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 + cd $(BIGTABLE_CHECKOUT_DIR) && git pull origin master [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 cd googleapis-pb && git pull origin master # Make the directory where our *_pb2.py files will go. @@ -41,23 +41,17 @@ generate: $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_data.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto \ - $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/datastore.proto \ - $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/entity.proto \ - $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/query.proto # Move the newly generated *_pb2.py files into our library. - mv $(GENERATED_DIR)/google/bigtable/v1/* $(BIGTABLE_DIR) - mv $(GENERATED_DIR)/google/bigtable/admin/cluster/v1/* $(BIGTABLE_DIR) - mv $(GENERATED_DIR)/google/bigtable/admin/table/v1/* $(BIGTABLE_DIR) - mv $(GENERATED_DIR)/google/datastore/v1beta3/* $(DATASTORE_DIR) + cp $(GENERATED_DIR)/google/bigtable/v1/* $(BIGTABLE_DIR) + cp $(GENERATED_DIR)/google/bigtable/admin/cluster/v1/* $(BIGTABLE_DIR) + cp $(GENERATED_DIR)/google/bigtable/admin/table/v1/* $(BIGTABLE_DIR) # Remove all existing *.proto files before we replace rm -f $(BIGTABLE_DIR)/*.proto - rm -f $(DATASTORE_DIR)/*.proto # Copy over the *.proto files into our library. cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/*.proto $(BIGTABLE_DIR) cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/*.proto $(BIGTABLE_DIR) cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/*.proto $(BIGTABLE_DIR) cp $(BIGTABLE_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) - cp $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/*.proto $(DATASTORE_DIR) # Rename all *.proto files in our library with an # underscore and remove executable bit. cd $(BIGTABLE_DIR) && \ @@ -65,18 +59,10 @@ generate: chmod -x $$filename ; \ mv $$filename _$$filename ; \ done - cd $(DATASTORE_DIR) && \ - for filename in *.proto; do \ - chmod -x $$filename ; \ - mv $$filename _$$filename ; \ - done # Separate the gRPC parts of the operations service from the # non-gRPC parts so that the protos from `googleapis-common-protos` # can be used without gRPC. python scripts/make_operations_grpc.py - # Separate the gRPC parts of the datastore service from the - # non-gRPC parts so that the protos can be used without gRPC. - python scripts/make_datastore_grpc.py # Rewrite the imports in the generated *_pb2.py files. python scripts/rewrite_imports.py @@ -84,6 +70,6 @@ check_generate: python scripts/check_generate.py clean: - rm -fr cloud-bigtable-client $(GENERATED_DIR) + rm -fr $(BIGTABLE_CHECKOUT_DIR) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) .PHONY: generate check_generate clean diff --git a/Makefile.datastore b/Makefile.datastore new file mode 100644 index 000000000000..0cb26bebc5cf --- /dev/null +++ b/Makefile.datastore @@ -0,0 +1,53 @@ +GENERATED_DIR=$(shell pwd)/generated_python +DATASTORE_DIR=$(shell pwd)/gcloud/datastore/_generated +GRPC_PLUGIN=grpc_python_plugin +PROTOC_CMD=protoc +GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb + +help: + @echo 'Makefile for gcloud-python Bigtable protos ' + @echo ' ' + @echo ' make generate Generates the protobuf modules ' + @echo ' make check_generate Checks that generate succeeded ' + @echo ' make clean Clean generated files ' + +generate: + # Retrieve git repos that have our *.proto files. + [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 + cd googleapis-pb && git pull origin master + # Make the directory where our *_pb2.py files will go. + mkdir -p $(GENERATED_DIR) + # Generate all *_pb2.py files that do not require gRPC. + $(PROTOC_CMD) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ + --python_out=$(GENERATED_DIR) \ + $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/datastore.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/entity.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/query.proto + # Move the newly generated *_pb2.py files into our library. + cp $(GENERATED_DIR)/google/datastore/v1beta3/* $(DATASTORE_DIR) + # Remove all existing *.proto files before we replace + rm -f $(DATASTORE_DIR)/*.proto + # Copy over the *.proto files into our library. + cp $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/*.proto $(DATASTORE_DIR) + # Rename all *.proto files in our library with an + # underscore and remove executable bit. + cd $(DATASTORE_DIR) && \ + for filename in *.proto; do \ + chmod -x $$filename ; \ + mv $$filename _$$filename ; \ + done + # Separate the gRPC parts of the datastore service from the + # non-gRPC parts so that the protos can be used without gRPC. + PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \ + python scripts/make_datastore_grpc.py + # Rewrite the imports in the generated *_pb2.py files. + python scripts/rewrite_imports.py + +check_generate: + python scripts/check_generate.py + +clean: + rm -fr $(GENERATED_DIR) + +.PHONY: generate check_generate clean diff --git a/scripts/make_datastore_grpc.py b/scripts/make_datastore_grpc.py index 1de717c4a08c..30f0f4e47adb 100644 --- a/scripts/make_datastore_grpc.py +++ b/scripts/make_datastore_grpc.py @@ -28,9 +28,8 @@ 'v1beta3', 'datastore.proto') GRPC_ONLY_FILE = os.path.join(ROOT_DIR, 'gcloud', 'datastore', '_generated', 'datastore_grpc_pb2.py') -PROTOC_CMD = 'protoc' -GRPC_PLUGIN = 'grpc_python_plugin' - +PROTOC_CMD = os.environ.get('PROTOC_CMD', 'protoc') +GRPC_PLUGIN = os.environ.get('GRPC_PLUGIN', 'grpc_python_plugin') def get_pb2_contents_with_grpc(): """Get pb2 lines generated by protoc with gRPC plugin. diff --git a/scripts/make_operations_grpc.py b/scripts/make_operations_grpc.py index 65b877250594..109751680788 100644 --- a/scripts/make_operations_grpc.py +++ b/scripts/make_operations_grpc.py @@ -29,8 +29,8 @@ 'operations.proto') GRPC_ONLY_FILE = os.path.join(ROOT_DIR, 'gcloud', 'bigtable', '_generated', 'operations_grpc_pb2.py') -PROTOC_CMD = 'protoc' -GRPC_PLUGIN = 'grpc_python_plugin' +PROTOC_CMD = os.environ.get('PROTOC_CMD', 'protoc') +GRPC_PLUGIN = os.environ.get('GRPC_PLUGIN', 'grpc_python_plugin') def get_pb2_contents_with_grpc(): From 0f4a0641ee847c69b09b5b6bfd59169b67cac0f9 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 24 Jun 2016 16:51:11 -0400 Subject: [PATCH 012/103] Generate Bigtable V2 protos. Put them in a separate '_generated_v2' subdir, to ease migration. Attempt to factor generation process for clarity (toward #1482). --- Makefile.bigtable_v1 | 9 +- Makefile.bigtable_v2 | 75 + Makefile.datastore | 2 +- gcloud/bigtable/_generated_v2/__init__.py | 15 + gcloud/bigtable/_generated_v2/_bigtable.proto | 341 +++++ .../_bigtable_instance_admin.proto | 213 +++ .../_generated_v2/_bigtable_table_admin.proto | 213 +++ gcloud/bigtable/_generated_v2/_common.proto | 33 + gcloud/bigtable/_generated_v2/_data.proto | 529 +++++++ gcloud/bigtable/_generated_v2/_instance.proto | 78 + .../bigtable/_generated_v2/_operations.proto | 144 ++ gcloud/bigtable/_generated_v2/_table.proto | 155 ++ .../bigtable_instance_admin_pb2.py | 616 ++++++++ gcloud/bigtable/_generated_v2/bigtable_pb2.py | 807 +++++++++++ .../_generated_v2/bigtable_table_admin_pb2.py | 507 +++++++ gcloud/bigtable/_generated_v2/common_pb2.py | 67 + gcloud/bigtable/_generated_v2/data_pb2.py | 1260 +++++++++++++++++ gcloud/bigtable/_generated_v2/instance_pb2.py | 222 +++ .../_generated_v2/operations_grpc_pb2.py | 0 gcloud/bigtable/_generated_v2/table_pb2.py | 529 +++++++ scripts/make_datastore_grpc.py | 1 + scripts/make_operations_grpc.py | 3 +- scripts/rewrite_imports.py | 11 +- scripts/run_pylint.py | 1 + tox.ini | 2 +- 25 files changed, 5823 insertions(+), 10 deletions(-) create mode 100644 Makefile.bigtable_v2 create mode 100644 gcloud/bigtable/_generated_v2/__init__.py create mode 100644 gcloud/bigtable/_generated_v2/_bigtable.proto create mode 100644 gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto create mode 100644 gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto create mode 100644 gcloud/bigtable/_generated_v2/_common.proto create mode 100644 gcloud/bigtable/_generated_v2/_data.proto create mode 100644 gcloud/bigtable/_generated_v2/_instance.proto create mode 100644 gcloud/bigtable/_generated_v2/_operations.proto create mode 100644 gcloud/bigtable/_generated_v2/_table.proto create mode 100644 gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py create mode 100644 gcloud/bigtable/_generated_v2/bigtable_pb2.py create mode 100644 gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py create mode 100644 gcloud/bigtable/_generated_v2/common_pb2.py create mode 100644 gcloud/bigtable/_generated_v2/data_pb2.py create mode 100644 gcloud/bigtable/_generated_v2/instance_pb2.py create mode 100644 gcloud/bigtable/_generated_v2/operations_grpc_pb2.py create mode 100644 gcloud/bigtable/_generated_v2/table_pb2.py diff --git a/Makefile.bigtable_v1 b/Makefile.bigtable_v1 index a1a85f1b18a4..a2db4197cbbb 100644 --- a/Makefile.bigtable_v1 +++ b/Makefile.bigtable_v1 @@ -1,5 +1,6 @@ GENERATED_DIR=$(shell pwd)/generated_python -BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/_generated +GENERATED_SUBDIR=_generated +BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) GRPC_PLUGIN=grpc_python_plugin PROTOC_CMD=protoc BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client @@ -62,9 +63,11 @@ generate: # Separate the gRPC parts of the operations service from the # non-gRPC parts so that the protos from `googleapis-common-protos` # can be used without gRPC. - python scripts/make_operations_grpc.py + PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ + python scripts/make_operations_grpc.py # Rewrite the imports in the generated *_pb2.py files. - python scripts/rewrite_imports.py + python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py check_generate: python scripts/check_generate.py diff --git a/Makefile.bigtable_v2 b/Makefile.bigtable_v2 new file mode 100644 index 000000000000..03962bdd127d --- /dev/null +++ b/Makefile.bigtable_v2 @@ -0,0 +1,75 @@ +GENERATED_DIR=$(shell pwd)/generated_python +GENERATED_SUBDIR=_generated_v2 +BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) +GRPC_PLUGIN=grpc_python_plugin +PROTOC_CMD=protoc +BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client +BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-protos/src/main/proto +GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb + +help: + @echo 'Makefile for gcloud-python Bigtable protos ' + @echo ' ' + @echo ' make generate Generates the protobuf modules ' + @echo ' make check_generate Checks that generate succeeded ' + @echo ' make clean Clean generated files ' + +generate: + # Retrieve git repos that have our *.proto files. + [ -d $(BIGTABLE_CHECKOUT_DIR) ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 + cd $(BIGTABLE_CHECKOUT_DIR) && git pull origin master + [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 + cd googleapis-pb && git pull origin master + # Make the directory where our *_pb2.py files will go. + mkdir -p $(GENERATED_DIR) + # Generate all *_pb2.py files that require gRPC. + $(PROTOC_CMD) \ + --proto_path=$(BIGTABLE_PROTOS_DIR) \ + --python_out=$(GENERATED_DIR) \ + --plugin=protoc-gen-grpc=$(GRPC_PLUGIN) \ + --grpc_out=$(GENERATED_DIR) \ + $(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/bigtable.proto \ + $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_instance_admin.proto \ + $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_table_admin.proto + # Generate all *_pb2.py files that do not require gRPC. + $(PROTOC_CMD) \ + --proto_path=$(BIGTABLE_PROTOS_DIR) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ + --python_out=$(GENERATED_DIR) \ + $(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/data.proto \ + $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/common.proto \ + $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/instance.proto \ + $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/table.proto \ + # Move the newly generated *_pb2.py files into our library. + cp $(GENERATED_DIR)/google/bigtable/v2/* $(BIGTABLE_DIR) + cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR) + cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR) + # Remove all existing *.proto files before we replace + rm -f $(BIGTABLE_DIR)/*.proto + # Copy over the *.proto files into our library. + cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/*.proto $(BIGTABLE_DIR) + cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/*.proto $(BIGTABLE_DIR) + cp $(BIGTABLE_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) + # Rename all *.proto files in our library with an + # underscore and remove executable bit. + cd $(BIGTABLE_DIR) && \ + for filename in *.proto; do \ + chmod -x $$filename ; \ + mv $$filename _$$filename ; \ + done + # Separate the gRPC parts of the operations service from the + # non-gRPC parts so that the protos from `googleapis-common-protos` + # can be used without gRPC. + PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ + python scripts/make_operations_grpc.py + # Rewrite the imports in the generated *_pb2.py files. + python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py + +check_generate: + python scripts/check_generate.py + +clean: + rm -fr $(BIGTABLE_CHECKOUT_DIR) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) + +.PHONY: generate check_generate clean diff --git a/Makefile.datastore b/Makefile.datastore index 0cb26bebc5cf..02f430c53c50 100644 --- a/Makefile.datastore +++ b/Makefile.datastore @@ -42,7 +42,7 @@ generate: PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \ python scripts/make_datastore_grpc.py # Rewrite the imports in the generated *_pb2.py files. - python scripts/rewrite_imports.py + python scripts/rewrite_imports.py $(DATASTORE_DIR)/*pb2.py check_generate: python scripts/check_generate.py diff --git a/gcloud/bigtable/_generated_v2/__init__.py b/gcloud/bigtable/_generated_v2/__init__.py new file mode 100644 index 000000000000..ad35adcf05ae --- /dev/null +++ b/gcloud/bigtable/_generated_v2/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generated protobuf modules for Google Cloud Bigtable API.""" diff --git a/gcloud/bigtable/_generated_v2/_bigtable.proto b/gcloud/bigtable/_generated_v2/_bigtable.proto new file mode 100644 index 000000000000..900168773363 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_bigtable.proto @@ -0,0 +1,341 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/v2/data.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableProto"; +option java_package = "com.google.bigtable.v2"; + + +// Service for reading from and writing to existing Bigtable tables. +// +// Caution: This service is experimental. The details can change and the rpcs +// may or may not be active. +service Bigtable { + // Streams back the contents of all requested rows, optionally + // applying the same Reader filter to each. Depending on their size, + // rows and cells may be broken up across multiple responses, but + // atomicity of each row will still be preserved. See the + // ReadRowsResponse documentation for details. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" }; + } + + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { + option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" }; + } + + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by 'mutation'. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" }; + } + + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" }; + } + + // Mutates a row atomically based on the output of a predicate Reader filter. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" }; + } + + // Modifies a row atomically, reading the latest existing timestamp/value from + // the specified columns and writing a new value at + // max(existing timestamp, current server time) based on pre-defined + // read/modify/write rules. Returns the new contents of all modified cells. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" }; + } +} + +// Request message for Bigtable.ReadRows. +message ReadRowsRequest { + // The unique name of the table from which to read. + // Values are of the form + // projects//instances//tables/ + string table_name = 1; + + // The row keys and/or ranges to read. If not specified, reads from all rows. + RowSet rows = 2; + + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entirety of each row. + RowFilter filter = 3; + + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + int64 rows_limit = 4; +} + +// Response message for Bigtable.ReadRows. +message ReadRowsResponse { + // Specifies a piece of a row's contents returned as part of the read + // response stream. + message CellChunk { + // The row key for this chunk of data. If the row key is empty, + // this CellChunk is a continuation of the same row as the previous + // CellChunk in the response stream, even if that CellChunk was in a + // previous ReadRowsResponse message. + bytes row_key = 1; + + // The column family name for this chunk of data. If this message + // is not present this CellChunk is a continuation of the same column + // family as the previous CellChunk. The empty string can occur as a + // column family name in a response so clients must check + // explicitly for the presence of this message, not just for + // family_name.value being non-empty. + google.protobuf.StringValue family_name = 2; + + // The column qualifier for this chunk of data. If this message + // is not present, this CellChunk is a continuation of the same column + // as the previous CellChunk. Column qualifiers may be empty so + // clients must check for the presence of this message, not just + // for qualifier.value being non-empty. + google.protobuf.BytesValue qualifier = 3; + + // The cell's stored timestamp, which also uniquely identifies it + // within its column. Values are always expressed in + // microseconds, but individual tables may set a coarser + // "granularity" to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will + // only allow values of "timestamp_micros" which are multiples of + // 1000. Timestamps are only set in the first CellChunk per cell + // (for cells split into multiple chunks). + int64 timestamp_micros = 4; + + // Labels applied to the cell by a + // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set + // on the first CellChunk per cell. + repeated string labels = 5; + + // The value stored in the cell. Cell values can be split across + // multiple CellChunks. In that case only the value field will be + // set in CellChunks after the first: the timestamp and labels + // will only be present in the first CellChunk, even if the first + // CellChunk came in a previous ReadRowsResponse. + bytes value = 6; + + // If this CellChunk is part of a chunked cell value and this is + // not the final chunk of that cell, value_size will be set to the + // total length of the cell value. The client can use this size + // to pre-allocate memory to hold the full cell value. + int32 value_size = 7; + + oneof row_status { + // Indicates that the client should drop all previous chunks for + // "row_key", as it will be re-read from the beginning. + bool reset_row = 8; + + // Indicates that the client can safely process all previous chunks for + // "row_key", as its data has been fully read. + bool commit_row = 9; + } + } + + repeated CellChunk chunks = 1; + + // Optionally the server might return the row key of the last row it + // has scanned. The client can use this to construct a more + // efficient retry request if needed: any row keys or portions of + // ranges less than this row key can be dropped from the request. + // This is primarily useful for cases where the server has read a + // lot of data that was filtered out since the last committed row + // key, allowing the client to skip that work on a retry. + bytes last_scanned_row_key = 2; +} + +// Request message for Bigtable.SampleRowKeys. +message SampleRowKeysRequest { + // The unique name of the table from which to sample row keys. + // Values are of the form + // projects//instances//tables/
+ string table_name = 1; +} + +// Response message for Bigtable.SampleRowKeys. +message SampleRowKeysResponse { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + bytes row_key = 1; + + // Approximate total storage space used by all rows in the table which precede + // "row_key". Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // "offset_bytes" fields. + int64 offset_bytes = 2; +} + +// Request message for Bigtable.MutateRow. +message MutateRowRequest { + // The unique name of the table to which the mutation should be applied. + // Values are of the form + // projects//instances//tables/
+ string table_name = 1; + + // The key of the row to which the mutation should be applied. + bytes row_key = 2; + + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + repeated Mutation mutations = 3; +} + +// Response message for Bigtable.MutateRow. +message MutateRowResponse { + +} + +// Request message for BigtableService.MutateRows. +message MutateRowsRequest { + message Entry { + // The key of the row to which the `mutations` should be applied. + bytes row_key = 1; + + // Changes to be atomically applied to the specified row. Mutations are + // applied in order, meaning that earlier mutations can be masked by + // later ones. + // At least one mutation must be specified. + repeated Mutation mutations = 2; + } + + // The unique name of the table to which the mutations should be applied. + string table_name = 1; + + // The row keys/mutations to be applied in bulk. + // Each entry is applied as an atomic mutation, but the entries may be + // applied in arbitrary order (even between entries for the same row). + // At least one entry must be specified, and in total the entries may + // contain at most 100000 mutations. + repeated Entry entries = 2; +} + +// Response message for BigtableService.MutateRows. +message MutateRowsResponse { + message Entry { + // The index into the original request's `entries` list of the Entry + // for which a result is being reported. + int64 index = 1; + + // The result of the request Entry identified by `index`. + // Depending on how requests are batched during execution, it is possible + // for one Entry to fail due to an error with another Entry. In the event + // that this occurs, the same error will be reported for both entries. + google.rpc.Status status = 2; + } + + // One or more results for Entries from the batch request. + repeated Entry entries = 1; +} + +// Request message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowRequest { + // The unique name of the table to which the conditional mutation should be + // applied. + // Values are of the form + // projects//instances//tables/
+ string table_name = 1; + + // The key of the row to which the conditional mutation should be applied. + bytes row_key = 2; + + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either "true_mutations" or + // "false_mutations" will be executed. If unset, checks that the row contains + // any values at all. + RowFilter predicate_filter = 6; + + // Changes to be atomically applied to the specified row if "predicate_filter" + // yields at least one cell when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "false_mutations" is empty, and at most + // 100000. + repeated Mutation true_mutations = 4; + + // Changes to be atomically applied to the specified row if "predicate_filter" + // does not yield any cells when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "true_mutations" is empty, and at most + // 100000. + repeated Mutation false_mutations = 5; +} + +// Response message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowResponse { + // Whether or not the request's "predicate_filter" yielded any results for + // the specified row. + bool predicate_matched = 1; +} + +// Request message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowRequest { + // The unique name of the table to which the read/modify/write rules should be + // applied. + // Values are of the form + // projects//instances//tables/
+ string table_name = 1; + + // The key of the row to which the read/modify/write rules should be applied. + bytes row_key = 2; + + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + repeated ReadModifyWriteRule rules = 3; +} + +// Response message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowResponse { + // A Row containing the new contents of all cells modified by the request. + Row row = 1; +} diff --git a/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto b/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto new file mode 100644 index 000000000000..c27d266a8e62 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto @@ -0,0 +1,213 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/common.proto"; +import "google/bigtable/admin/v2/instance.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableInstanceAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable Instances and +// Clusters. Provides access to the Instance and Cluster schemas only, not the +// tables metadata or data stored in those tables. +// +// Caution: This service is experimental. The details can change and the rpcs +// may or may not be active. +service BigtableInstanceAdmin { + // Create an instance within a project. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{name=projects/*}/instances" body: "*" }; + } + + // Gets information about an instance. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc GetInstance(GetInstanceRequest) returns (Instance) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" }; + } + + // Lists information about instances in a project. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { + option (google.api.http) = { get: "/v2/{name=projects/*}/instances" }; + } + + // Updates an instance within a project. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc UpdateInstance(Instance) returns (Instance) { + option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" }; + } + + // Delete an instance from a project. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" }; + } + + // Creates a cluster within an instance. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*}/clusters" body: "cluster" }; + } + + // Gets information about a cluster. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" }; + } + + // Lists information about clusters in an instance. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}/clusters" }; + } + + // Updates a cluster within an instance. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { + option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" }; + } + + // Deletes a cluster from an instance. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" }; + } +} + +// Request message for BigtableInstanceAdmin.CreateInstance. +message CreateInstanceRequest { + string name = 1; + + string instance_id = 2; + + Instance instance = 3; + + map clusters = 4; +} + +// Request message for BigtableInstanceAdmin.GetInstance. +message GetInstanceRequest { + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListInstances. +message ListInstancesRequest { + string name = 1; + + // Locations from which Instance information could not be retrieved, + // due to an outage or some other transient condition. + // Instances whose Clusters are all in one of the failed locations + // may be missing from 'instances', and Instances with at least one + // Cluster in a failed location may only have partial information returned. + repeated string failed_locations = 2; + + string page_token = 3; +} + +// Response message for BigtableInstanceAdmin.ListInstances. +message ListInstancesResponse { + repeated Instance instances = 1; + + string next_page_token = 2; +} + +// Request message for BigtableInstanceAdmin.DeleteInstance. +message DeleteInstanceRequest { + string name = 1; +} + +// Request message for BigtableInstanceAdmin.CreateCluster. +message CreateClusterRequest { + string name = 1; + + string cluster_id = 2; + + Cluster cluster = 3; +} + +// Request message for BigtableInstanceAdmin.GetCluster. +message GetClusterRequest { + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListClusters. +message ListClustersRequest { + // Values are of the form projects//instances/ + // Use = '-' to list Clusters for all Instances in a project, + // for example "projects/myproject/instances/-" + string name = 1; + + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListClusters. +message ListClustersResponse { + repeated Cluster clusters = 1; + + // Locations from which Cluster information could not be retrieved, + // due to an outage or some other transient condition. + // Clusters from these locations may be missing from 'clusters', + // or may only have partial information returned. + repeated string failed_locations = 2; + + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.DeleteCluster. +message DeleteClusterRequest { + string name = 1; +} + +// The metadata for the Operation returned by CreateInstance. +message CreateInstanceMetadata { + // The request that prompted the initiation of this CreateInstance operation. + CreateInstanceRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} diff --git a/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto b/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto new file mode 100644 index 000000000000..8654a2a92d57 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto @@ -0,0 +1,213 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/table.proto"; +import "google/protobuf/empty.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableTableAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// Provides access to the table schemas only, not the data stored within +// the tables. +// +// Caution: This service is experimental. The details can change and the rpcs +// may or may not be active. +service BigtableTableAdmin { + // Creates a new table in the specified instance. + // The table can be created with a full set of initial column families, + // specified in the request. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc CreateTable(CreateTableRequest) returns (Table) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*}/tables" body: "*" }; + } + + // Lists all tables served from a specified instance. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}/tables" }; + } + + // Gets metadata information about the specified table. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" }; + } + + // Permanently deletes a specified table and all of its data. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" }; + } + + // Atomically performs a series of column family modifications + // on the specified table. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" }; + } + + // Permanently drop/delete a row range from a specified table. The request can + // specify whether to delete all rows in a table, or only those that match a + // particular prefix. + // + // Caution: This rpc is experimental. The details can change and the rpc + // may or may not be active. + rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" }; + } +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] +message CreateTableRequest { + // An initial split point for a newly created table. + message Split { + // Row key to use as an initial tablet boundary. + bytes key = 1; + } + + // The unique name of the instance in which to create the table. + // Values are of the form projects//instances/ + string name = 1; + + // The name by which the new table should be referred to within the parent + // instance, e.g. "foobar" rather than "/tables/foobar". + string table_id = 2; + + // The Table to create. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + repeated Split initial_splits = 4; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] +message DropRowRangeRequest { + // The unique name of the table on which to drop a range of rows. + // Values are of the form projects//instances//tables/
+ string name = 1; + + oneof target { + // Delete all rows that start with this row key prefix. Prefix cannot be + // zero length. + bytes row_key_prefix = 2; + + // Delete all rows in the table. Setting this to false is a no-op. + bool delete_all_data_from_table = 3; + } +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesRequest { + // The unique name of the instance for which tables should be listed. + // Values are of the form projects//instances/ + string name = 1; + + // The view to be applied to the returned tables' fields. + // Defaults to NAME_ONLY if unspecified (no others are currently supported). + Table.View view = 2; + + // Not yet supported. + string page_token = 3; +} + +// Response message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesResponse { + // The tables present in the requested cluster. + repeated Table tables = 1; + + string next_page_token = 2; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] +message GetTableRequest { + // The unique name of the requested table. + // Values are of the form projects//instances//tables/
+ string name = 1; + + // The view to be applied to the returned table's fields. + // Defaults to SCHEMA_ONLY if unspecified. + Table.View view = 2; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] +message DeleteTableRequest { + // The unique name of the table to be deleted. + // Values are of the form projects//instances//tables/
+ string name = 1; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] +message ModifyColumnFamiliesRequest { + // A create, update, or delete of a particular column family. + message Modification { + // The ID of the column family to be modified. + string id = 1; + + oneof mod { + // Create a new column family with the specified schema, or fail if + // one already exists with the given ID. + ColumnFamily create = 2; + + // Update an existing column family to the specified schema, or fail + // if no column family exists with the given ID. + ColumnFamily update = 3; + + // Drop (delete) the column family with the given ID, or fail if no such + // family exists. + bool drop = 4; + } + } + + // The unique name of the table whose families should be modified. + // Values are of the form projects//instances//tables/
+ string name = 1; + + // Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2; +} diff --git a/gcloud/bigtable/_generated_v2/_common.proto b/gcloud/bigtable/_generated_v2/_common.proto new file mode 100644 index 000000000000..17cd4ffe19e8 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_common.proto @@ -0,0 +1,33 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/timestamp.proto"; + +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +enum StorageType { + STORAGE_TYPE_UNSPECIFIED = 0; + + SSD = 1; + + HDD = 2; +} diff --git a/gcloud/bigtable/_generated_v2/_data.proto b/gcloud/bigtable/_generated_v2/_data.proto new file mode 100644 index 000000000000..aab5e9972603 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_data.proto @@ -0,0 +1,529 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +option java_multiple_files = true; +option java_outer_classname = "DataProto"; +option java_package = "com.google.bigtable.v2"; + + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +message Row { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + bytes key = 1; + + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + repeated Family families = 2; +} + +// Specifies (some of) the contents of a single row/column family of a table. +message Family { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + string name = 1; + + // Must not be empty. Sorted in order of increasing "qualifier". + repeated Column columns = 2; +} + +// Specifies (some of) the contents of a single row/column of a table. +message Column { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its "column_qualifier_regex_filter" field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + bytes qualifier = 1; + + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + repeated Cell cells = 2; +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +message Cell { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser "granularity" to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of "timestamp_micros" which are multiples of 1000. + int64 timestamp_micros = 1; + + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + bytes value = 2; + + // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. + repeated string labels = 3; +} + +// Specifies a contiguous range of rows. +message RowRange { + // The row key at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_key { + // Used when giving an inclusive lower bound for the range. + bytes start_key_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_key_open = 2; + } + + // The row key at which to end the range. + // If neither field is set, interpreted as the infinite row key, exclusive. + oneof end_key { + // Used when giving an inclusive upper bound for the range. + bytes end_key_open = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_key_closed = 4; + } +} + +// Specifies a non-contiguous set of rows. +message RowSet { + // Single rows included in the set. + repeated bytes row_keys = 1; + + // Contiguous row ranges included in the set. + repeated RowRange row_ranges = 2; +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from : to +// :, where both bounds can be either inclusive or +// exclusive. +message ColumnRange { + // The name of the column family within which this range falls. + string family_name = 1; + + // The column qualifier at which to start the range (within 'column_family'). + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_qualifier { + // Used when giving an inclusive lower bound for the range. + bytes start_qualifier_closed = 2; + + // Used when giving an exclusive lower bound for the range. + bytes start_qualifier_open = 3; + } + + // The column qualifier at which to end the range (within 'column_family'). + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_qualifier { + // Used when giving an inclusive upper bound for the range. + bytes end_qualifier_closed = 4; + + // Used when giving an exclusive upper bound for the range. + bytes end_qualifier_open = 5; + } +} + +// Specified a contiguous range of microsecond timestamps. +message TimestampRange { + // Inclusive lower bound. If left empty, interpreted as 0. + int64 start_timestamp_micros = 1; + + // Exclusive upper bound. If left empty, interpreted as infinity. + int64 end_timestamp_micros = 2; +} + +// Specifies a contiguous range of raw byte values. +message ValueRange { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_value { + // Used when giving an inclusive lower bound for the range. + bytes start_value_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_value_open = 2; + } + + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_value { + // Used when giving an inclusive upper bound for the range. + bytes end_value_closed = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_value_open = 4; + } +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the "value_regex_filter", +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that RE2(.) is equivalent by default to +// RE2([^\n]), meaning that it does not match newlines. When attempting to match +// an arbitrary byte, you should therefore use the escape sequence '\C', which +// may need to be further escaped as '\\C' in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the "strip_value_transformer", which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +message RowFilter { + // A RowFilter which sends rows through several RowFilters in sequence. + message Chain { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which sends each row to each of several component + // RowFilters and interleaves the results. + message Interleave { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // All interleaved filters are executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which evaluates one of two possible RowFilters, depending on + // whether or not a predicate RowFilter outputs any cells from the input row. + // + // IMPORTANT NOTE: The predicate filter does not execute atomically with the + // true and false filters, which may lead to inconsistent or unexpected + // results. Additionally, Condition filters have poor performance, especially + // when filters are set for the false condition. + message Condition { + // If "predicate_filter" outputs any cells, then "true_filter" will be + // evaluated on the input row. Otherwise, "false_filter" will be evaluated. + RowFilter predicate_filter = 1; + + // The filter to apply to the input row if "predicate_filter" returns any + // results. If not provided, no results will be returned in the true case. + RowFilter true_filter = 2; + + // The filter to apply to the input row if "predicate_filter" does not + // return any results. If not provided, no results will be returned in the + // false case. + RowFilter false_filter = 3; + } + + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + oneof filter { + // Applies several RowFilters to the data in sequence, progressively + // narrowing the results. + Chain chain = 1; + + // Applies several RowFilters to the data in parallel and combines the + // results. + Interleave interleave = 2; + + // Applies one of two possible RowFilters to the data based on the output of + // a predicate RowFilter. + Condition condition = 3; + + // ADVANCED USE ONLY. + // Hook for introspection into the RowFilter. Outputs all cells directly to + // the output of the read rather than to any parent filter. Consider the + // following example: + // + // Chain( + // FamilyRegex("A"), + // Interleave( + // All(), + // Chain(Label("foo"), Sink()) + // ), + // QualifierRegex("B") + // ) + // + // A,A,1,w + // A,B,2,x + // B,B,4,z + // | + // FamilyRegex("A") + // | + // A,A,1,w + // A,B,2,x + // | + // +------------+-------------+ + // | | + // All() Label(foo) + // | | + // A,A,1,w A,A,1,w,labels:[foo] + // A,B,2,x A,B,2,x,labels:[foo] + // | | + // | Sink() --------------+ + // | | | + // +------------+ x------+ A,A,1,w,labels:[foo] + // | A,B,2,x,labels:[foo] + // A,A,1,w | + // A,B,2,x | + // | | + // QualifierRegex("B") | + // | | + // A,B,2,x | + // | | + // +--------------------------------+ + // | + // A,A,1,w,labels:[foo] + // A,B,2,x,labels:[foo] // could be switched + // A,B,2,x // could be switched + // + // Despite being excluded by the qualifier filter, a copy of every cell + // that reaches the sink is present in the final result. + // + // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], + // duplicate cells are possible, and appear in an unspecified mutual order. + // In this case we have a duplicate with column "A:B" and timestamp 2, + // because one copy passed through the all filter while the other was + // passed through the label and sink. Note that one copy has label "foo", + // while the other does not. + // + // Cannot be used within the `predicate_filter`, `true_filter`, or + // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. + bool sink = 16; + + // Matches all cells, regardless of input. Functionally equivalent to + // leaving `filter` unset, but included for completeness. + bool pass_all_filter = 17; + + // Does not match any cells, regardless of input. Useful for temporarily + // disabling just part of a filter. + bool block_all_filter = 18; + + // Matches only cells from rows whose keys satisfy the given RE2 regex. In + // other words, passes through the entire row when the key matches, and + // otherwise produces an empty row. + // Note that, since row keys can contain arbitrary bytes, the '\C' escape + // sequence must be used if a true wildcard is desired. The '.' character + // will not match the new line character '\n', which may be present in a + // binary key. + bytes row_key_regex_filter = 4; + + // Matches all cells from a row with probability p, and matches no cells + // from the row with probability 1-p. + double row_sample_filter = 14; + + // Matches only cells from columns whose families satisfy the given RE2 + // regex. For technical reasons, the regex must not contain the ':' + // character, even if it is not being used as a literal. + // Note that, since column families cannot contain the new line character + // '\n', it is sufficient to use '.' as a full wildcard when matching + // column family names. + string family_name_regex_filter = 5; + + // Matches only cells from columns whose qualifiers satisfy the given RE2 + // regex. + // Note that, since column qualifiers can contain arbitrary bytes, the '\C' + // escape sequence must be used if a true wildcard is desired. The '.' + // character will not match the new line character '\n', which may be + // present in a binary qualifier. + bytes column_qualifier_regex_filter = 6; + + // Matches only cells from columns within the given range. + ColumnRange column_range_filter = 7; + + // Matches only cells with timestamps within the given range. + TimestampRange timestamp_range_filter = 8; + + // Matches only cells with values that satisfy the given regular expression. + // Note that, since cell values can contain arbitrary bytes, the '\C' escape + // sequence must be used if a true wildcard is desired. The '.' character + // will not match the new line character '\n', which may be present in a + // binary value. + bytes value_regex_filter = 9; + + // Matches only cells with values that fall within the given range. + ValueRange value_range_filter = 15; + + // Skips the first N cells of each row, matching all subsequent cells. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_offset_filter = 10; + + // Matches only the first N cells of each row. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_limit_filter = 11; + + // Matches only the most recent N cells within each column. For example, + // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, + // skip all earlier cells in "foo:bar", and then begin matching again in + // column "foo:bar2". + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_column_limit_filter = 12; + + // Replaces each cell's value with the empty string. + bool strip_value_transformer = 13; + + // Applies the given label to all cells in the output row. This allows + // the client to determine which results were produced from which part of + // the filter. + // + // Values must be at most 15 characters in length, and match the RE2 + // pattern [a-z0-9\\-]+ + // + // Due to a technical limitation, it is not currently possible to apply + // multiple labels to a cell. As a result, a Chain may have no more than + // one sub-filter which contains a apply_label_transformer. It is okay for + // an Interleave to contain multiple apply_label_transformers, as they will + // be applied to separate copies of the input. This may be relaxed in the + // future. + string apply_label_transformer = 19; + } +} + +// Specifies a particular change to be made to the contents of a row. +message Mutation { + // A Mutation which sets the value of the specified cell. + message SetCell { + // The name of the family into which new data should be written. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the "granularity" of the table (e.g. micros, millis). + int64 timestamp_micros = 3; + + // The value to be written into the specified cell. + bytes value = 4; + } + + // A Mutation which deletes cells from the specified column, optionally + // restricting the deletions to a given timestamp range. + message DeleteFromColumn { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The range of timestamps within which cells should be deleted. + TimestampRange time_range = 3; + } + + // A Mutation which deletes all cells from the specified column family. + message DeleteFromFamily { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + } + + // A Mutation which deletes all cells from the containing row. + message DeleteFromRow { + + } + + // Which of the possible Mutation types to apply. + oneof mutation { + // Set a cell's value. + SetCell set_cell = 1; + + // Deletes cells from a column. + DeleteFromColumn delete_from_column = 2; + + // Deletes cells from a column family. + DeleteFromFamily delete_from_family = 3; + + // Deletes cells from the entire row. + DeleteFromRow delete_from_row = 4; + } +} + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +message ReadModifyWriteRule { + // The name of the family to which the read/modify/write should be applied. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The rule used to determine the column's new latest value from its current + // latest value. + oneof rule { + // Rule specifying that "append_value" be appended to the existing value. + // If the targeted cell is unset, it will be treated as containing the + // empty string. + bytes append_value = 3; + + // Rule specifying that "increment_amount" be added to the existing value. + // If the targeted cell is unset, it will be treated as containing a zero. + // Otherwise, the targeted cell must contain an 8-byte value (interpreted + // as a 64-bit big-endian signed integer), or the entire request will fail. + int64 increment_amount = 4; + } +} diff --git a/gcloud/bigtable/_generated_v2/_instance.proto b/gcloud/bigtable/_generated_v2/_instance.proto new file mode 100644 index 000000000000..eb324ea9a02c --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_instance.proto @@ -0,0 +1,78 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/common.proto"; + +option java_multiple_files = true; +option java_outer_classname = "InstanceProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +message Instance { + enum State { + STATE_NOT_KNOWN = 0; + + READY = 1; + + // The instance is currently being created, and may be destroyed + // if the creation process encounters an error. + CREATING = 2; + } + + // @OutputOnly + string name = 1; + + string display_name = 2; + + // @OutputOnly + State state = 3; +} + +message Cluster { + enum State { + STATE_NOT_KNOWN = 0; + + READY = 1; + + // The cluster is currently being created, and may be destroyed + // if the creation process encounters an error. + CREATING = 2; + + RESIZING = 3; + + // The cluster has no backing nodes. The data (tables) still + // exist, but no operations can be performed on the cluster. + DISABLED = 4; + } + + // @OutputOnly + string name = 1; + + // @CreationOnly + // Currently only zones are supported, e.g. projects/*/locations/us-central1-b + string location = 2; + + // @OutputOnly + State state = 3; + + int32 serve_nodes = 4; + + // @CreationOnly + StorageType default_storage_type = 5; +} diff --git a/gcloud/bigtable/_generated_v2/_operations.proto b/gcloud/bigtable/_generated_v2/_operations.proto new file mode 100644 index 000000000000..a358d0a38787 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_operations.proto @@ -0,0 +1,144 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.longrunning; + +import "google/api/annotations.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/rpc/status.proto"; + +option java_multiple_files = true; +option java_outer_classname = "OperationsProto"; +option java_package = "com.google.longrunning"; + + +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return [Operation][google.longrunning.Operation] to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or using `google.watcher.v1.Watcher` interface to watch +// the response, or pass the operation resource to another API (such as Google +// Cloud Pub/Sub API) to receive the response. Any API service that returns +// long-running operations should implement the `Operations` interface so +// developers can have a consistent client experience. +service Operations { + // Gets the latest state of a long-running operation. Clients may use this + // method to poll the operation result at intervals as recommended by the API + // service. + rpc GetOperation(GetOperationRequest) returns (Operation) { + option (google.api.http) = { get: "/v1/{name=operations/**}" }; + } + + // Lists operations that match the specified filter in the request. If the + // server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { + option (google.api.http) = { get: "/v1/{name=operations}" }; + } + + // Starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not + // guaranteed. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. Clients may use + // [Operations.GetOperation] or other methods to check whether the + // cancellation succeeded or the operation completed despite cancellation. + rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; + } + + // Deletes a long-running operation. It indicates the client is no longer + // interested in the operation result. It does not cancel the operation. + rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=operations/**}" }; + } +} + +// This resource represents a long-running operation that is the result of a +// network API call. +message Operation { + // The name of the operation resource, which is only unique within the same + // service that originally returns it. + string name = 1; + + // Some service-specific metadata associated with the operation. It typically + // contains progress information and common metadata such as create time. + // Some services may not provide such metadata. Any method that returns a + // long-running operation should document the metadata type, if any. + google.protobuf.Any metadata = 2; + + // If the value is false, it means the operation is still in progress. + // If true, the operation is completed and the `result` is available. + bool done = 3; + + oneof result { + // The error result of the operation in case of failure. + google.rpc.Status error = 4; + + // The normal response of the operation in case of success. If the original + // method returns no data on success, such as `Delete`, the response will be + // `google.protobuf.Empty`. If the original method is standard + // `Get`/`Create`/`Update`, the response should be the resource. For other + // methods, the response should have the type `XxxResponse`, where `Xxx` + // is the original method name. For example, if the original method name + // is `TakeSnapshot()`, the inferred response type will be + // `TakeSnapshotResponse`. + google.protobuf.Any response = 5; + } +} + +// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +message GetOperationRequest { + // The name of the operation resource. + string name = 1; +} + +// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +message ListOperationsRequest { + // The name of the operation collection. + string name = 4; + + // The standard List filter. + string filter = 1; + + // The standard List page size. + int32 page_size = 2; + + // The standard List page token. + string page_token = 3; +} + +// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +message ListOperationsResponse { + // A list of operations that match the specified filter in the request. + repeated Operation operations = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +message CancelOperationRequest { + // The name of the operation resource to be cancelled. + string name = 1; +} + +// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +message DeleteOperationRequest { + // The name of the operation resource to be deleted. + string name = 1; +} diff --git a/gcloud/bigtable/_generated_v2/_table.proto b/gcloud/bigtable/_generated_v2/_table.proto new file mode 100644 index 000000000000..f5516aaf1bf5 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_table.proto @@ -0,0 +1,155 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/protobuf/duration.proto"; + +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + // The state of a table's data in a particular cluster. + message ClusterState { + enum ReplicationState { + // The replication state of the table is unknown in this cluster. + STATE_NOT_KNOWN = 0; + + // The cluster was recently created, and the table must finish copying + // over pre-existing data from other clusters before it can begin + // receiving live replication updates and serving + // [Data API][google.bigtable.v2.BigtableService] requests. + INITIALIZING = 1; + + // The table is temporarily unable to serve + // [Data API][google.bigtable.v2.BigtableService] requests from this + // cluster due to planned internal maintenance. + PLANNED_MAINTENANCE = 2; + + // The table is temporarily unable to serve + // [Data API][google.bigtable.v2.BigtableService] requests from this + // cluster due to unplanned or emergency maintenance. + UNPLANNED_MAINTENANCE = 3; + + // The table can serve + // [Data API][google.bigtable.v2.BigtableService] requests from this + // cluster. Depending on replication delay, reads may not immediately + // reflect the state of the table in other clusters. + READY = 4; + } + + // The state of replication for the table in this cluster. + // @OutputOnly + ReplicationState replication_state = 1; + } + + enum TimestampGranularity { + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; + + MILLIS = 1; + } + + // Defines a view over a table's fields. + enum View { + // Uses the default view for each method as documented in its request. + VIEW_UNSPECIFIED = 0; + + // Only populates `name`. + NAME_ONLY = 1; + + // Only populates `name` and fields related to the table's schema. + SCHEMA_VIEW = 2; + + // Only populates `name` and fields related to the table's + // replication state. + REPLICATION_VIEW = 3; + + // Populates all fields. + FULL = 4; + } + + // The unique name of the table. Values are of the form + // projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + // Views: NAME_ONLY, SCHEMA_VIEW, REPLICATION_VIEW, FULL + // @OutputOnly + string name = 1; + + // Map from cluster ID to per-cluster table state. + // If it could not be determined whether or not the table has data in a + // particular cluster (for example, if its zone is unavailable), then + // there will be an entry for the cluster with UNKNOWN `replication_status`. + // Views: REPLICATION_VIEW, FULL + // @OutputOnly + map cluster_states = 2; + + // The column families configured for this table, mapped by column family ID. + // Views: SCHEMA_VIEW, FULL + // @CreationOnly + map column_families = 3; + + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // If unspecified at creation time, the value will be set to MILLIS. + // Views: SCHEMA_VIEW, FULL + // @CreationOnly + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // Garbage collection rule specified as a protobuf. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 1; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} diff --git a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py new file mode 100644 index 000000000000..344918dc1c44 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py @@ -0,0 +1,616 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/bigtable_instance_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 +from gcloud.bigtable._generated_v2 import instance_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2 +from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/bigtable_instance_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n6google/bigtable/admin/v2/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a%google/bigtable/admin/v2/common.proto\x1a\'google/bigtable/admin/v2/instance.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x95\x02\n\x15\x43reateInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"R\n\x14ListInstancesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"l\n\x14\x43reateClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"7\n\x13ListClustersRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xd3\x0b\n\x15\x42igtableInstanceAdmin\x12\x8c\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\"*\x82\xd3\xe4\x93\x02$\"\x1f/v2/{name=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x99\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v2/{name=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\";\x82\xd3\xe4\x93\x02\x35\"*/v2/{name=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa1\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}B<\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( + name='ClustersEntry', + full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=489, + serialized_end=571, +) + +_CREATEINSTANCEREQUEST = _descriptor.Descriptor( + name='CreateInstanceRequest', + full_name='google.bigtable.admin.v2.CreateInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.CreateInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instance_id', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instance', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.CreateInstanceRequest.clusters', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=294, + serialized_end=571, +) + + +_GETINSTANCEREQUEST = _descriptor.Descriptor( + name='GetInstanceRequest', + full_name='google.bigtable.admin.v2.GetInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=573, + serialized_end=607, +) + + +_LISTINSTANCESREQUEST = _descriptor.Descriptor( + name='ListInstancesRequest', + full_name='google.bigtable.admin.v2.ListInstancesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.ListInstancesRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesRequest.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=609, + serialized_end=691, +) + + +_LISTINSTANCESRESPONSE = _descriptor.Descriptor( + name='ListInstancesResponse', + full_name='google.bigtable.admin.v2.ListInstancesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='instances', full_name='google.bigtable.admin.v2.ListInstancesResponse.instances', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=693, + serialized_end=796, +) + + +_DELETEINSTANCEREQUEST = _descriptor.Descriptor( + name='DeleteInstanceRequest', + full_name='google.bigtable.admin.v2.DeleteInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=798, + serialized_end=835, +) + + +_CREATECLUSTERREQUEST = _descriptor.Descriptor( + name='CreateClusterRequest', + full_name='google.bigtable.admin.v2.CreateClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.CreateClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cluster_id', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cluster', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=837, + serialized_end=945, +) + + +_GETCLUSTERREQUEST = _descriptor.Descriptor( + name='GetClusterRequest', + full_name='google.bigtable.admin.v2.GetClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=947, + serialized_end=980, +) + + +_LISTCLUSTERSREQUEST = _descriptor.Descriptor( + name='ListClustersRequest', + full_name='google.bigtable.admin.v2.ListClustersRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.ListClustersRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListClustersRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=982, + serialized_end=1037, +) + + +_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( + name='ListClustersResponse', + full_name='google.bigtable.admin.v2.ListClustersResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.ListClustersResponse.clusters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListClustersResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListClustersResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1039, + serialized_end=1165, +) + + +_DELETECLUSTERREQUEST = _descriptor.Descriptor( + name='DeleteClusterRequest', + full_name='google.bigtable.admin.v2.DeleteClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1167, + serialized_end=1203, +) + + +_CREATEINSTANCEMETADATA = _descriptor.Descriptor( + name='CreateInstanceMetadata', + full_name='google.bigtable.admin.v2.CreateInstanceMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1206, + serialized_end=1404, +) + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE +_CREATEINSTANCEREQUEST.fields_by_name['clusters'].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY +_LISTINSTANCESRESPONSE.fields_by_name['instances'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE +_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_CREATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['GetInstanceRequest'] = _GETINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesRequest'] = _LISTINSTANCESREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesResponse'] = _LISTINSTANCESRESPONSE +DESCRIPTOR.message_types_by_name['DeleteInstanceRequest'] = _DELETEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST +DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST +DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE +DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['CreateInstanceMetadata'] = _CREATEINSTANCEMETADATA + +CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), dict( + + ClustersEntry = _reflection.GeneratedProtocolMessageType('ClustersEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEREQUEST_CLUSTERSENTRY, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) + )) + , + DESCRIPTOR = _CREATEINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) + )) +_sym_db.RegisterMessage(CreateInstanceRequest) +_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) + +GetInstanceRequest = _reflection.GeneratedProtocolMessageType('GetInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _GETINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) + )) +_sym_db.RegisterMessage(GetInstanceRequest) + +ListInstancesRequest = _reflection.GeneratedProtocolMessageType('ListInstancesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) + )) +_sym_db.RegisterMessage(ListInstancesRequest) + +ListInstancesResponse = _reflection.GeneratedProtocolMessageType('ListInstancesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) + )) +_sym_db.RegisterMessage(ListInstancesResponse) + +DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType('DeleteInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETEINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) + )) +_sym_db.RegisterMessage(DeleteInstanceRequest) + +CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _CREATECLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) + )) +_sym_db.RegisterMessage(CreateClusterRequest) + +GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _GETCLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) + )) +_sym_db.RegisterMessage(GetClusterRequest) + +ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) + )) +_sym_db.RegisterMessage(ListClustersRequest) + +ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) + )) +_sym_db.RegisterMessage(ListClustersResponse) + +DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETECLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) + )) +_sym_db.RegisterMessage(DeleteClusterRequest) + +CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType('CreateInstanceMetadata', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEMETADATA, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) + )) +_sym_db.RegisterMessage(CreateInstanceMetadata) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True +_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_pb2.py new file mode 100644 index 000000000000..5c9e39dc4e89 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/bigtable_pb2.py @@ -0,0 +1,807 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/bigtable.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/v2/bigtable.proto', + package='google.bigtable.v2', + syntax='proto3', + serialized_pb=_b('\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x92\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x13\n\x11MutateRowResponse\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"D\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"E\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"M\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B)\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v2_dot_data__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_READROWSREQUEST = _descriptor.Descriptor( + name='ReadRowsRequest', + full_name='google.bigtable.v2.ReadRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='google.bigtable.v2.ReadRowsRequest.rows', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='filter', full_name='google.bigtable.v2.ReadRowsRequest.filter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows_limit', full_name='google.bigtable.v2.ReadRowsRequest.rows_limit', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=176, + serialized_end=322, +) + + +_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( + name='CellChunk', + full_name='google.bigtable.v2.ReadRowsResponse.CellChunk', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.family_name', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='qualifier', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.labels', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_size', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value_size', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reset_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row', index=7, + number=8, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='commit_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='row_status', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_status', + index=0, containing_type=None, fields=[]), + ], + serialized_start=440, + serialized_end=701, +) + +_READROWSRESPONSE = _descriptor.Descriptor( + name='ReadRowsResponse', + full_name='google.bigtable.v2.ReadRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='chunks', full_name='google.bigtable.v2.ReadRowsResponse.chunks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='last_scanned_row_key', full_name='google.bigtable.v2.ReadRowsResponse.last_scanned_row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_READROWSRESPONSE_CELLCHUNK, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=325, + serialized_end=701, +) + + +_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( + name='SampleRowKeysRequest', + full_name='google.bigtable.v2.SampleRowKeysRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.SampleRowKeysRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=703, + serialized_end=745, +) + + +_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( + name='SampleRowKeysResponse', + full_name='google.bigtable.v2.SampleRowKeysResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.SampleRowKeysResponse.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset_bytes', full_name='google.bigtable.v2.SampleRowKeysResponse.offset_bytes', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=747, + serialized_end=809, +) + + +_MUTATEROWREQUEST = _descriptor.Descriptor( + name='MutateRowRequest', + full_name='google.bigtable.v2.MutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowRequest.mutations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=811, + serialized_end=915, +) + + +_MUTATEROWRESPONSE = _descriptor.Descriptor( + name='MutateRowResponse', + full_name='google.bigtable.v2.MutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=917, + serialized_end=936, +) + + +_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsRequest.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowsRequest.Entry.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowsRequest.Entry.mutations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1042, + serialized_end=1115, +) + +_MUTATEROWSREQUEST = _descriptor.Descriptor( + name='MutateRowsRequest', + full_name='google.bigtable.v2.MutateRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsRequest.entries', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSREQUEST_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=939, + serialized_end=1115, +) + + +_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsResponse.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='google.bigtable.v2.MutateRowsResponse.Entry.index', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='status', full_name='google.bigtable.v2.MutateRowsResponse.Entry.status', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1203, + serialized_end=1261, +) + +_MUTATEROWSRESPONSE = _descriptor.Descriptor( + name='MutateRowsResponse', + full_name='google.bigtable.v2.MutateRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsResponse.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSRESPONSE_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1118, + serialized_end=1261, +) + + +_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( + name='CheckAndMutateRowRequest', + full_name='google.bigtable.v2.CheckAndMutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.CheckAndMutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.CheckAndMutateRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predicate_filter', full_name='google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter', index=2, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='true_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.true_mutations', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='false_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.false_mutations', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1264, + serialized_end=1493, +) + + +_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( + name='CheckAndMutateRowResponse', + full_name='google.bigtable.v2.CheckAndMutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='predicate_matched', full_name='google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1495, + serialized_end=1549, +) + + +_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( + name='ReadModifyWriteRowRequest', + full_name='google.bigtable.v2.ReadModifyWriteRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.rules', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1551, + serialized_end=1671, +) + + +_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( + name='ReadModifyWriteRowResponse', + full_name='google.bigtable.v2.ReadModifyWriteRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row', full_name='google.bigtable.v2.ReadModifyWriteRowResponse.row', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1673, + serialized_end=1739, +) + +_READROWSREQUEST.fields_by_name['rows'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWSET +_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER +_READROWSRESPONSE_CELLCHUNK.fields_by_name['family_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE +_READROWSRESPONSE_CELLCHUNK.fields_by_name['qualifier'].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE +_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CELLCHUNK +_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST +_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY +_MUTATEROWSRESPONSE_ENTRY.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS +_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE +_MUTATEROWSRESPONSE.fields_by_name['entries'].message_type = _MUTATEROWSRESPONSE_ENTRY +_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER +_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._READMODIFYWRITERULE +_READMODIFYWRITEROWRESPONSE.fields_by_name['row'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROW +DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST +DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE +DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST +DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['MutateRowResponse'] = _MUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST +DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE +DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowResponse'] = _READMODIFYWRITEROWRESPONSE + +ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( + DESCRIPTOR = _READROWSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) + )) +_sym_db.RegisterMessage(ReadRowsRequest) + +ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( + + CellChunk = _reflection.GeneratedProtocolMessageType('CellChunk', (_message.Message,), dict( + DESCRIPTOR = _READROWSRESPONSE_CELLCHUNK, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) + )) + , + DESCRIPTOR = _READROWSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) + )) +_sym_db.RegisterMessage(ReadRowsResponse) +_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) + +SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) + )) +_sym_db.RegisterMessage(SampleRowKeysRequest) + +SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) + )) +_sym_db.RegisterMessage(SampleRowKeysResponse) + +MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) + )) +_sym_db.RegisterMessage(MutateRowRequest) + +MutateRowResponse = _reflection.GeneratedProtocolMessageType('MutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) + )) +_sym_db.RegisterMessage(MutateRowResponse) + +MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) + )) +_sym_db.RegisterMessage(MutateRowsRequest) +_sym_db.RegisterMessage(MutateRowsRequest.Entry) + +MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSRESPONSE_ENTRY, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) + )) +_sym_db.RegisterMessage(MutateRowsResponse) +_sym_db.RegisterMessage(MutateRowsResponse.Entry) + +CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) + )) +_sym_db.RegisterMessage(CheckAndMutateRowRequest) + +CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) + )) +_sym_db.RegisterMessage(CheckAndMutateRowResponse) + +ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowRequest) + +ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowResponse', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowResponse) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py new file mode 100644 index 000000000000..bd695d5f6b3e --- /dev/null +++ b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py @@ -0,0 +1,507 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/bigtable_table_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/bigtable_table_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n3google/bigtable/admin/v2/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a$google/bigtable/admin/v2/table.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xc6\x01\n\x12\x43reateTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"i\n\x11ListTablesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod2\xb4\x07\n\x12\x42igtableTableAdmin\x12\x91\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"3\x82\xd3\xe4\x93\x02-\"(/v2/{name=projects/*/instances/*}/tables:\x01*\x12\x99\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"0\x82\xd3\xe4\x93\x02*\x12(/v2/{name=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*B9\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( + name='Split', + full_name='google.bigtable.admin.v2.CreateTableRequest.Split', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateTableRequest.Split.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=357, + serialized_end=377, +) + +_CREATETABLEREQUEST = _descriptor.Descriptor( + name='CreateTableRequest', + full_name='google.bigtable.admin.v2.CreateTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.CreateTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='table_id', full_name='google.bigtable.admin.v2.CreateTableRequest.table_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='table', full_name='google.bigtable.admin.v2.CreateTableRequest.table', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='initial_splits', full_name='google.bigtable.admin.v2.CreateTableRequest.initial_splits', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CREATETABLEREQUEST_SPLIT, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=179, + serialized_end=377, +) + + +_DROPROWRANGEREQUEST = _descriptor.Descriptor( + name='DropRowRangeRequest', + full_name='google.bigtable.admin.v2.DropRowRangeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DropRowRangeRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key_prefix', full_name='google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_all_data_from_table', full_name='google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target', + index=0, containing_type=None, fields=[]), + ], + serialized_start=379, + serialized_end=488, +) + + +_LISTTABLESREQUEST = _descriptor.Descriptor( + name='ListTablesRequest', + full_name='google.bigtable.admin.v2.ListTablesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.ListTablesRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.ListTablesRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=490, + serialized_end=595, +) + + +_LISTTABLESRESPONSE = _descriptor.Descriptor( + name='ListTablesResponse', + full_name='google.bigtable.admin.v2.ListTablesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tables', full_name='google.bigtable.admin.v2.ListTablesResponse.tables', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListTablesResponse.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=597, + serialized_end=691, +) + + +_GETTABLEREQUEST = _descriptor.Descriptor( + name='GetTableRequest', + full_name='google.bigtable.admin.v2.GetTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.GetTableRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=693, + serialized_end=776, +) + + +_DELETETABLEREQUEST = _descriptor.Descriptor( + name='DeleteTableRequest', + full_name='google.bigtable.admin.v2.DeleteTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=778, + serialized_end=812, +) + + +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( + name='Modification', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='create', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='update', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='drop', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', + index=0, containing_type=None, fields=[]), + ], + serialized_start=952, + serialized_end=1117, +) + +_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( + name='ModifyColumnFamiliesRequest', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modifications', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=815, + serialized_end=1117, +) + +_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST +_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE +_CREATETABLEREQUEST.fields_by_name['initial_splits'].message_type = _CREATETABLEREQUEST_SPLIT +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['row_key_prefix']) +_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table']) +_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_LISTTABLESREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW +_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE +_GETTABLEREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name['modifications'].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION +DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST +DESCRIPTOR.message_types_by_name['DropRowRangeRequest'] = _DROPROWRANGEREQUEST +DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST +DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE +DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST +DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST +DESCRIPTOR.message_types_by_name['ModifyColumnFamiliesRequest'] = _MODIFYCOLUMNFAMILIESREQUEST + +CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( + + Split = _reflection.GeneratedProtocolMessageType('Split', (_message.Message,), dict( + DESCRIPTOR = _CREATETABLEREQUEST_SPLIT, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) + )) + , + DESCRIPTOR = _CREATETABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) + )) +_sym_db.RegisterMessage(CreateTableRequest) +_sym_db.RegisterMessage(CreateTableRequest.Split) + +DropRowRangeRequest = _reflection.GeneratedProtocolMessageType('DropRowRangeRequest', (_message.Message,), dict( + DESCRIPTOR = _DROPROWRANGEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) + )) +_sym_db.RegisterMessage(DropRowRangeRequest) + +ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) + )) +_sym_db.RegisterMessage(ListTablesRequest) + +ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) + )) +_sym_db.RegisterMessage(ListTablesResponse) + +GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( + DESCRIPTOR = _GETTABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) + )) +_sym_db.RegisterMessage(GetTableRequest) + +DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETETABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) + )) +_sym_db.RegisterMessage(DeleteTableRequest) + +ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType('ModifyColumnFamiliesRequest', (_message.Message,), dict( + + Modification = _reflection.GeneratedProtocolMessageType('Modification', (_message.Message,), dict( + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) + )) + , + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) + )) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/common_pb2.py b/gcloud/bigtable/_generated_v2/common_pb2.py new file mode 100644 index 000000000000..298130452971 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/common_pb2.py @@ -0,0 +1,67 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/common.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/common.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n%google/bigtable/admin/v2/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42-\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_STORAGETYPE = _descriptor.EnumDescriptor( + name='StorageType', + full_name='google.bigtable.admin.v2.StorageType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STORAGE_TYPE_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SSD', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HDD', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=130, + serialized_end=191, +) +_sym_db.RegisterEnumDescriptor(_STORAGETYPE) + +StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) +STORAGE_TYPE_UNSPECIFIED = 0 +SSD = 1 +HDD = 2 + + +DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/data_pb2.py b/gcloud/bigtable/_generated_v2/data_pb2.py new file mode 100644 index 000000000000..6db08fbd12c3 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/data_pb2.py @@ -0,0 +1,1260 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/data.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/v2/data.proto', + package='google.bigtable.v2', + syntax='proto3', + serialized_pb=_b('\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB%\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_ROW = _descriptor.Descriptor( + name='Row', + full_name='google.bigtable.v2.Row', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.v2.Row.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='families', full_name='google.bigtable.v2.Row.families', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=53, + serialized_end=117, +) + + +_FAMILY = _descriptor.Descriptor( + name='Family', + full_name='google.bigtable.v2.Family', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.v2.Family.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='columns', full_name='google.bigtable.v2.Family.columns', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=119, + serialized_end=186, +) + + +_COLUMN = _descriptor.Descriptor( + name='Column', + full_name='google.bigtable.v2.Column', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='qualifier', full_name='google.bigtable.v2.Column.qualifier', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells', full_name='google.bigtable.v2.Column.cells', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=188, + serialized_end=256, +) + + +_CELL = _descriptor.Descriptor( + name='Cell', + full_name='google.bigtable.v2.Cell', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.Cell.timestamp_micros', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.Cell.value', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.v2.Cell.labels', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=258, + serialized_end=321, +) + + +_ROWRANGE = _descriptor.Descriptor( + name='RowRange', + full_name='google.bigtable.v2.RowRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_key_closed', full_name='google.bigtable.v2.RowRange.start_key_closed', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_key_open', full_name='google.bigtable.v2.RowRange.start_key_open', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_key_open', full_name='google.bigtable.v2.RowRange.end_key_open', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_key_closed', full_name='google.bigtable.v2.RowRange.end_key_closed', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_key', full_name='google.bigtable.v2.RowRange.start_key', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_key', full_name='google.bigtable.v2.RowRange.end_key', + index=1, containing_type=None, fields=[]), + ], + serialized_start=324, + serialized_end=462, +) + + +_ROWSET = _descriptor.Descriptor( + name='RowSet', + full_name='google.bigtable.v2.RowSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_keys', full_name='google.bigtable.v2.RowSet.row_keys', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_ranges', full_name='google.bigtable.v2.RowSet.row_ranges', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=464, + serialized_end=540, +) + + +_COLUMNRANGE = _descriptor.Descriptor( + name='ColumnRange', + full_name='google.bigtable.v2.ColumnRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ColumnRange.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.start_qualifier_closed', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_qualifier_open', full_name='google.bigtable.v2.ColumnRange.start_qualifier_open', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.end_qualifier_closed', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_qualifier_open', full_name='google.bigtable.v2.ColumnRange.end_qualifier_open', index=4, + number=5, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_qualifier', full_name='google.bigtable.v2.ColumnRange.start_qualifier', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_qualifier', full_name='google.bigtable.v2.ColumnRange.end_qualifier', + index=1, containing_type=None, fields=[]), + ], + serialized_start=543, + serialized_end=741, +) + + +_TIMESTAMPRANGE = _descriptor.Descriptor( + name='TimestampRange', + full_name='google.bigtable.v2.TimestampRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.start_timestamp_micros', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.end_timestamp_micros', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=743, + serialized_end=821, +) + + +_VALUERANGE = _descriptor.Descriptor( + name='ValueRange', + full_name='google.bigtable.v2.ValueRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_value_closed', full_name='google.bigtable.v2.ValueRange.start_value_closed', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_value_open', full_name='google.bigtable.v2.ValueRange.start_value_open', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_value_closed', full_name='google.bigtable.v2.ValueRange.end_value_closed', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_value_open', full_name='google.bigtable.v2.ValueRange.end_value_open', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_value', full_name='google.bigtable.v2.ValueRange.start_value', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_value', full_name='google.bigtable.v2.ValueRange.end_value', + index=1, containing_type=None, fields=[]), + ], + serialized_start=824, + serialized_end=976, +) + + +_ROWFILTER_CHAIN = _descriptor.Descriptor( + name='Chain', + full_name='google.bigtable.v2.RowFilter.Chain', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='filters', full_name='google.bigtable.v2.RowFilter.Chain.filters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1795, + serialized_end=1850, +) + +_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( + name='Interleave', + full_name='google.bigtable.v2.RowFilter.Interleave', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='filters', full_name='google.bigtable.v2.RowFilter.Interleave.filters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1852, + serialized_end=1912, +) + +_ROWFILTER_CONDITION = _descriptor.Descriptor( + name='Condition', + full_name='google.bigtable.v2.RowFilter.Condition', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='predicate_filter', full_name='google.bigtable.v2.RowFilter.Condition.predicate_filter', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='true_filter', full_name='google.bigtable.v2.RowFilter.Condition.true_filter', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='false_filter', full_name='google.bigtable.v2.RowFilter.Condition.false_filter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1915, + serialized_end=2088, +) + +_ROWFILTER = _descriptor.Descriptor( + name='RowFilter', + full_name='google.bigtable.v2.RowFilter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='chain', full_name='google.bigtable.v2.RowFilter.chain', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interleave', full_name='google.bigtable.v2.RowFilter.interleave', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='condition', full_name='google.bigtable.v2.RowFilter.condition', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sink', full_name='google.bigtable.v2.RowFilter.sink', index=3, + number=16, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pass_all_filter', full_name='google.bigtable.v2.RowFilter.pass_all_filter', index=4, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='block_all_filter', full_name='google.bigtable.v2.RowFilter.block_all_filter', index=5, + number=18, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key_regex_filter', full_name='google.bigtable.v2.RowFilter.row_key_regex_filter', index=6, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_sample_filter', full_name='google.bigtable.v2.RowFilter.row_sample_filter', index=7, + number=14, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='family_name_regex_filter', full_name='google.bigtable.v2.RowFilter.family_name_regex_filter', index=8, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier_regex_filter', full_name='google.bigtable.v2.RowFilter.column_qualifier_regex_filter', index=9, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_range_filter', full_name='google.bigtable.v2.RowFilter.column_range_filter', index=10, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_range_filter', full_name='google.bigtable.v2.RowFilter.timestamp_range_filter', index=11, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_regex_filter', full_name='google.bigtable.v2.RowFilter.value_regex_filter', index=12, + number=9, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_range_filter', full_name='google.bigtable.v2.RowFilter.value_range_filter', index=13, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_row_offset_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_offset_filter', index=14, + number=10, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_row_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_limit_filter', index=15, + number=11, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_column_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_column_limit_filter', index=16, + number=12, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strip_value_transformer', full_name='google.bigtable.v2.RowFilter.strip_value_transformer', index=17, + number=13, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='apply_label_transformer', full_name='google.bigtable.v2.RowFilter.apply_label_transformer', index=18, + number=19, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='filter', full_name='google.bigtable.v2.RowFilter.filter', + index=0, containing_type=None, fields=[]), + ], + serialized_start=979, + serialized_end=2098, +) + + +_MUTATION_SETCELL = _descriptor.Descriptor( + name='SetCell', + full_name='google.bigtable.v2.Mutation.SetCell', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.SetCell.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.Mutation.SetCell.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.Mutation.SetCell.timestamp_micros', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.Mutation.SetCell.value', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2396, + serialized_end=2493, +) + +_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( + name='DeleteFromColumn', + full_name='google.bigtable.v2.Mutation.DeleteFromColumn', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='time_range', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.time_range', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2495, + serialized_end=2616, +) + +_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( + name='DeleteFromFamily', + full_name='google.bigtable.v2.Mutation.DeleteFromFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromFamily.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2618, + serialized_end=2657, +) + +_MUTATION_DELETEFROMROW = _descriptor.Descriptor( + name='DeleteFromRow', + full_name='google.bigtable.v2.Mutation.DeleteFromRow', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2659, + serialized_end=2674, +) + +_MUTATION = _descriptor.Descriptor( + name='Mutation', + full_name='google.bigtable.v2.Mutation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='set_cell', full_name='google.bigtable.v2.Mutation.set_cell', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_column', full_name='google.bigtable.v2.Mutation.delete_from_column', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_family', full_name='google.bigtable.v2.Mutation.delete_from_family', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_row', full_name='google.bigtable.v2.Mutation.delete_from_row', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATION_SETCELL, _MUTATION_DELETEFROMCOLUMN, _MUTATION_DELETEFROMFAMILY, _MUTATION_DELETEFROMROW, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='mutation', full_name='google.bigtable.v2.Mutation.mutation', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2101, + serialized_end=2686, +) + + +_READMODIFYWRITERULE = _descriptor.Descriptor( + name='ReadModifyWriteRule', + full_name='google.bigtable.v2.ReadModifyWriteRule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ReadModifyWriteRule.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.ReadModifyWriteRule.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='append_value', full_name='google.bigtable.v2.ReadModifyWriteRule.append_value', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='increment_amount', full_name='google.bigtable.v2.ReadModifyWriteRule.increment_amount', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='rule', full_name='google.bigtable.v2.ReadModifyWriteRule.rule', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2689, + serialized_end=2817, +) + +_ROW.fields_by_name['families'].message_type = _FAMILY +_FAMILY.fields_by_name['columns'].message_type = _COLUMN +_COLUMN.fields_by_name['cells'].message_type = _CELL +_ROWRANGE.oneofs_by_name['start_key'].fields.append( + _ROWRANGE.fields_by_name['start_key_closed']) +_ROWRANGE.fields_by_name['start_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] +_ROWRANGE.oneofs_by_name['start_key'].fields.append( + _ROWRANGE.fields_by_name['start_key_open']) +_ROWRANGE.fields_by_name['start_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] +_ROWRANGE.oneofs_by_name['end_key'].fields.append( + _ROWRANGE.fields_by_name['end_key_open']) +_ROWRANGE.fields_by_name['end_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] +_ROWRANGE.oneofs_by_name['end_key'].fields.append( + _ROWRANGE.fields_by_name['end_key_closed']) +_ROWRANGE.fields_by_name['end_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] +_ROWSET.fields_by_name['row_ranges'].message_type = _ROWRANGE +_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['start_qualifier_closed']) +_COLUMNRANGE.fields_by_name['start_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] +_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['start_qualifier_open']) +_COLUMNRANGE.fields_by_name['start_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] +_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['end_qualifier_closed']) +_COLUMNRANGE.fields_by_name['end_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] +_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['end_qualifier_open']) +_COLUMNRANGE.fields_by_name['end_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] +_VALUERANGE.oneofs_by_name['start_value'].fields.append( + _VALUERANGE.fields_by_name['start_value_closed']) +_VALUERANGE.fields_by_name['start_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] +_VALUERANGE.oneofs_by_name['start_value'].fields.append( + _VALUERANGE.fields_by_name['start_value_open']) +_VALUERANGE.fields_by_name['start_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] +_VALUERANGE.oneofs_by_name['end_value'].fields.append( + _VALUERANGE.fields_by_name['end_value_closed']) +_VALUERANGE.fields_by_name['end_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] +_VALUERANGE.oneofs_by_name['end_value'].fields.append( + _VALUERANGE.fields_by_name['end_value_open']) +_VALUERANGE.fields_by_name['end_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] +_ROWFILTER_CHAIN.fields_by_name['filters'].message_type = _ROWFILTER +_ROWFILTER_CHAIN.containing_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.fields_by_name['filters'].message_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['predicate_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['true_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['false_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.containing_type = _ROWFILTER +_ROWFILTER.fields_by_name['chain'].message_type = _ROWFILTER_CHAIN +_ROWFILTER.fields_by_name['interleave'].message_type = _ROWFILTER_INTERLEAVE +_ROWFILTER.fields_by_name['condition'].message_type = _ROWFILTER_CONDITION +_ROWFILTER.fields_by_name['column_range_filter'].message_type = _COLUMNRANGE +_ROWFILTER.fields_by_name['timestamp_range_filter'].message_type = _TIMESTAMPRANGE +_ROWFILTER.fields_by_name['value_range_filter'].message_type = _VALUERANGE +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['chain']) +_ROWFILTER.fields_by_name['chain'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['interleave']) +_ROWFILTER.fields_by_name['interleave'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['condition']) +_ROWFILTER.fields_by_name['condition'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['sink']) +_ROWFILTER.fields_by_name['sink'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['pass_all_filter']) +_ROWFILTER.fields_by_name['pass_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['block_all_filter']) +_ROWFILTER.fields_by_name['block_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['row_key_regex_filter']) +_ROWFILTER.fields_by_name['row_key_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['row_sample_filter']) +_ROWFILTER.fields_by_name['row_sample_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['family_name_regex_filter']) +_ROWFILTER.fields_by_name['family_name_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['column_qualifier_regex_filter']) +_ROWFILTER.fields_by_name['column_qualifier_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['column_range_filter']) +_ROWFILTER.fields_by_name['column_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['timestamp_range_filter']) +_ROWFILTER.fields_by_name['timestamp_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['value_regex_filter']) +_ROWFILTER.fields_by_name['value_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['value_range_filter']) +_ROWFILTER.fields_by_name['value_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_row_offset_filter']) +_ROWFILTER.fields_by_name['cells_per_row_offset_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_row_limit_filter']) +_ROWFILTER.fields_by_name['cells_per_row_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_column_limit_filter']) +_ROWFILTER.fields_by_name['cells_per_column_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['strip_value_transformer']) +_ROWFILTER.fields_by_name['strip_value_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['apply_label_transformer']) +_ROWFILTER.fields_by_name['apply_label_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_MUTATION_SETCELL.containing_type = _MUTATION +_MUTATION_DELETEFROMCOLUMN.fields_by_name['time_range'].message_type = _TIMESTAMPRANGE +_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION +_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION +_MUTATION_DELETEFROMROW.containing_type = _MUTATION +_MUTATION.fields_by_name['set_cell'].message_type = _MUTATION_SETCELL +_MUTATION.fields_by_name['delete_from_column'].message_type = _MUTATION_DELETEFROMCOLUMN +_MUTATION.fields_by_name['delete_from_family'].message_type = _MUTATION_DELETEFROMFAMILY +_MUTATION.fields_by_name['delete_from_row'].message_type = _MUTATION_DELETEFROMROW +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['set_cell']) +_MUTATION.fields_by_name['set_cell'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_column']) +_MUTATION.fields_by_name['delete_from_column'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_family']) +_MUTATION.fields_by_name['delete_from_family'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_row']) +_MUTATION.fields_by_name['delete_from_row'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( + _READMODIFYWRITERULE.fields_by_name['append_value']) +_READMODIFYWRITERULE.fields_by_name['append_value'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] +_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( + _READMODIFYWRITERULE.fields_by_name['increment_amount']) +_READMODIFYWRITERULE.fields_by_name['increment_amount'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] +DESCRIPTOR.message_types_by_name['Row'] = _ROW +DESCRIPTOR.message_types_by_name['Family'] = _FAMILY +DESCRIPTOR.message_types_by_name['Column'] = _COLUMN +DESCRIPTOR.message_types_by_name['Cell'] = _CELL +DESCRIPTOR.message_types_by_name['RowRange'] = _ROWRANGE +DESCRIPTOR.message_types_by_name['RowSet'] = _ROWSET +DESCRIPTOR.message_types_by_name['ColumnRange'] = _COLUMNRANGE +DESCRIPTOR.message_types_by_name['TimestampRange'] = _TIMESTAMPRANGE +DESCRIPTOR.message_types_by_name['ValueRange'] = _VALUERANGE +DESCRIPTOR.message_types_by_name['RowFilter'] = _ROWFILTER +DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION +DESCRIPTOR.message_types_by_name['ReadModifyWriteRule'] = _READMODIFYWRITERULE + +Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( + DESCRIPTOR = _ROW, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) + )) +_sym_db.RegisterMessage(Row) + +Family = _reflection.GeneratedProtocolMessageType('Family', (_message.Message,), dict( + DESCRIPTOR = _FAMILY, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) + )) +_sym_db.RegisterMessage(Family) + +Column = _reflection.GeneratedProtocolMessageType('Column', (_message.Message,), dict( + DESCRIPTOR = _COLUMN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) + )) +_sym_db.RegisterMessage(Column) + +Cell = _reflection.GeneratedProtocolMessageType('Cell', (_message.Message,), dict( + DESCRIPTOR = _CELL, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) + )) +_sym_db.RegisterMessage(Cell) + +RowRange = _reflection.GeneratedProtocolMessageType('RowRange', (_message.Message,), dict( + DESCRIPTOR = _ROWRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) + )) +_sym_db.RegisterMessage(RowRange) + +RowSet = _reflection.GeneratedProtocolMessageType('RowSet', (_message.Message,), dict( + DESCRIPTOR = _ROWSET, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) + )) +_sym_db.RegisterMessage(RowSet) + +ColumnRange = _reflection.GeneratedProtocolMessageType('ColumnRange', (_message.Message,), dict( + DESCRIPTOR = _COLUMNRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) + )) +_sym_db.RegisterMessage(ColumnRange) + +TimestampRange = _reflection.GeneratedProtocolMessageType('TimestampRange', (_message.Message,), dict( + DESCRIPTOR = _TIMESTAMPRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) + )) +_sym_db.RegisterMessage(TimestampRange) + +ValueRange = _reflection.GeneratedProtocolMessageType('ValueRange', (_message.Message,), dict( + DESCRIPTOR = _VALUERANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) + )) +_sym_db.RegisterMessage(ValueRange) + +RowFilter = _reflection.GeneratedProtocolMessageType('RowFilter', (_message.Message,), dict( + + Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_CHAIN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) + )) + , + + Interleave = _reflection.GeneratedProtocolMessageType('Interleave', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_INTERLEAVE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) + )) + , + + Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_CONDITION, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) + )) + , + DESCRIPTOR = _ROWFILTER, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) + )) +_sym_db.RegisterMessage(RowFilter) +_sym_db.RegisterMessage(RowFilter.Chain) +_sym_db.RegisterMessage(RowFilter.Interleave) +_sym_db.RegisterMessage(RowFilter.Condition) + +Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict( + + SetCell = _reflection.GeneratedProtocolMessageType('SetCell', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_SETCELL, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) + )) + , + + DeleteFromColumn = _reflection.GeneratedProtocolMessageType('DeleteFromColumn', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMCOLUMN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) + )) + , + + DeleteFromFamily = _reflection.GeneratedProtocolMessageType('DeleteFromFamily', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMFAMILY, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) + )) + , + + DeleteFromRow = _reflection.GeneratedProtocolMessageType('DeleteFromRow', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMROW, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) + )) + , + DESCRIPTOR = _MUTATION, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) + )) +_sym_db.RegisterMessage(Mutation) +_sym_db.RegisterMessage(Mutation.SetCell) +_sym_db.RegisterMessage(Mutation.DeleteFromColumn) +_sym_db.RegisterMessage(Mutation.DeleteFromFamily) +_sym_db.RegisterMessage(Mutation.DeleteFromRow) + +ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRule', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITERULE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) + )) +_sym_db.RegisterMessage(ReadModifyWriteRule) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\tDataProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/instance_pb2.py b/gcloud/bigtable/_generated_v2/instance_pb2.py new file mode 100644 index 000000000000..2161bf33bf58 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/instance_pb2.py @@ -0,0 +1,222 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/instance.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/instance.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n\'google/bigtable/admin/v2/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a%google/bigtable/admin/v2/common.proto\"\x9e\x01\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType\"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04\x42/\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_INSTANCE_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Instance.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=244, + serialized_end=297, +) +_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) + +_CLUSTER_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Cluster.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RESIZING', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DISABLED', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=489, + serialized_end=570, +) +_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) + + +_INSTANCE = _descriptor.Descriptor( + name='Instance', + full_name='google.bigtable.admin.v2.Instance', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Instance.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='display_name', full_name='google.bigtable.admin.v2.Instance.display_name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Instance.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _INSTANCE_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=139, + serialized_end=297, +) + + +_CLUSTER = _descriptor.Descriptor( + name='Cluster', + full_name='google.bigtable.admin.v2.Cluster', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Cluster.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='location', full_name='google.bigtable.admin.v2.Cluster.location', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Cluster.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='serve_nodes', full_name='google.bigtable.admin.v2.Cluster.serve_nodes', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='default_storage_type', full_name='google.bigtable.admin.v2.Cluster.default_storage_type', index=4, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CLUSTER_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=300, + serialized_end=570, +) + +_INSTANCE.fields_by_name['state'].enum_type = _INSTANCE_STATE +_INSTANCE_STATE.containing_type = _INSTANCE +_CLUSTER.fields_by_name['state'].enum_type = _CLUSTER_STATE +_CLUSTER.fields_by_name['default_storage_type'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2._STORAGETYPE +_CLUSTER_STATE.containing_type = _CLUSTER +DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE +DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER + +Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict( + DESCRIPTOR = _INSTANCE, + __module__ = 'google.bigtable.admin.v2.instance_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) + )) +_sym_db.RegisterMessage(Instance) + +Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict( + DESCRIPTOR = _CLUSTER, + __module__ = 'google.bigtable.admin.v2.instance_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) + )) +_sym_db.RegisterMessage(Cluster) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/gcloud/bigtable/_generated_v2/table_pb2.py b/gcloud/bigtable/_generated_v2/table_pb2.py new file mode 100644 index 000000000000..e39091d88351 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/table_pb2.py @@ -0,0 +1,529 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/table.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/table.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n$google/bigtable/admin/v2/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1egoogle/protobuf/duration.proto\"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState\"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04ruleB,\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( + name='ReplicationState', + full_name='google.bigtable.admin.v2.Table.ClusterState.ReplicationState', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INITIALIZING', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PLANNED_MAINTENANCE', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UNPLANNED_MAINTENANCE', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=458, + serialized_end=578, +) +_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) + +_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( + name='TimestampGranularity', + full_name='google.bigtable.admin.v2.Table.TimestampGranularity', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TIMESTAMP_GRANULARITY_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MILLIS', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=775, + serialized_end=848, +) +_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) + +_TABLE_VIEW = _descriptor.EnumDescriptor( + name='View', + full_name='google.bigtable.admin.v2.Table.View', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='VIEW_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NAME_ONLY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCHEMA_VIEW', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REPLICATION_VIEW', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FULL', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=850, + serialized_end=942, +) +_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) + + +_TABLE_CLUSTERSTATE = _descriptor.Descriptor( + name='ClusterState', + full_name='google.bigtable.admin.v2.Table.ClusterState', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='replication_state', full_name='google.bigtable.admin.v2.Table.ClusterState.replication_state', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _TABLE_CLUSTERSTATE_REPLICATIONSTATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=352, + serialized_end=578, +) + +_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( + name='ClusterStatesEntry', + full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=580, + serialized_end=678, +) + +_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( + name='ColumnFamiliesEntry', + full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=680, + serialized_end=773, +) + +_TABLE = _descriptor.Descriptor( + name='Table', + full_name='google.bigtable.admin.v2.Table', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Table.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cluster_states', full_name='google.bigtable.admin.v2.Table.cluster_states', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TABLE_CLUSTERSTATE, _TABLE_CLUSTERSTATESENTRY, _TABLE_COLUMNFAMILIESENTRY, ], + enum_types=[ + _TABLE_TIMESTAMPGRANULARITY, + _TABLE_VIEW, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=99, + serialized_end=942, +) + + +_COLUMNFAMILY = _descriptor.Descriptor( + name='ColumnFamily', + full_name='google.bigtable.admin.v2.ColumnFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gc_rule', full_name='google.bigtable.admin.v2.ColumnFamily.gc_rule', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=944, + serialized_end=1009, +) + + +_GCRULE_INTERSECTION = _descriptor.Descriptor( + name='Intersection', + full_name='google.bigtable.admin.v2.GcRule.Intersection', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Intersection.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1224, + serialized_end=1287, +) + +_GCRULE_UNION = _descriptor.Descriptor( + name='Union', + full_name='google.bigtable.admin.v2.GcRule.Union', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Union.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1289, + serialized_end=1345, +) + +_GCRULE = _descriptor.Descriptor( + name='GcRule', + full_name='google.bigtable.admin.v2.GcRule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_num_versions', full_name='google.bigtable.admin.v2.GcRule.max_num_versions', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_age', full_name='google.bigtable.admin.v2.GcRule.max_age', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intersection', full_name='google.bigtable.admin.v2.GcRule.intersection', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='union', full_name='google.bigtable.admin.v2.GcRule.union', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='rule', full_name='google.bigtable.admin.v2.GcRule.rule', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1012, + serialized_end=1353, +) + +_TABLE_CLUSTERSTATE.fields_by_name['replication_state'].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE +_TABLE_CLUSTERSTATE.containing_type = _TABLE +_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE +_TABLE_CLUSTERSTATESENTRY.fields_by_name['value'].message_type = _TABLE_CLUSTERSTATE +_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE +_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY +_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE +_TABLE.fields_by_name['cluster_states'].message_type = _TABLE_CLUSTERSTATESENTRY +_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY +_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY +_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE +_TABLE_VIEW.containing_type = _TABLE +_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE +_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_INTERSECTION.containing_type = _GCRULE +_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_UNION.containing_type = _GCRULE +_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION +_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_num_versions']) +_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_age']) +_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['intersection']) +_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['union']) +_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +DESCRIPTOR.message_types_by_name['Table'] = _TABLE +DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY +DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE + +Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( + + ClusterState = _reflection.GeneratedProtocolMessageType('ClusterState', (_message.Message,), dict( + DESCRIPTOR = _TABLE_CLUSTERSTATE, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) + )) + , + + ClusterStatesEntry = _reflection.GeneratedProtocolMessageType('ClusterStatesEntry', (_message.Message,), dict( + DESCRIPTOR = _TABLE_CLUSTERSTATESENTRY, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) + )) + , + + ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( + DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) + )) + , + DESCRIPTOR = _TABLE, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) + )) +_sym_db.RegisterMessage(Table) +_sym_db.RegisterMessage(Table.ClusterState) +_sym_db.RegisterMessage(Table.ClusterStatesEntry) +_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) + +ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( + DESCRIPTOR = _COLUMNFAMILY, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) + )) +_sym_db.RegisterMessage(ColumnFamily) + +GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( + + Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_INTERSECTION, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) + )) + , + + Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_UNION, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) + )) + , + DESCRIPTOR = _GCRULE, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) + )) +_sym_db.RegisterMessage(GcRule) +_sym_db.RegisterMessage(GcRule.Intersection) +_sym_db.RegisterMessage(GcRule.Union) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001')) +_TABLE_CLUSTERSTATESENTRY.has_options = True +_TABLE_CLUSTERSTATESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TABLE_COLUMNFAMILIESENTRY.has_options = True +_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/scripts/make_datastore_grpc.py b/scripts/make_datastore_grpc.py index 30f0f4e47adb..5c460511a889 100644 --- a/scripts/make_datastore_grpc.py +++ b/scripts/make_datastore_grpc.py @@ -31,6 +31,7 @@ PROTOC_CMD = os.environ.get('PROTOC_CMD', 'protoc') GRPC_PLUGIN = os.environ.get('GRPC_PLUGIN', 'grpc_python_plugin') + def get_pb2_contents_with_grpc(): """Get pb2 lines generated by protoc with gRPC plugin. diff --git a/scripts/make_operations_grpc.py b/scripts/make_operations_grpc.py index 109751680788..0b6a1e8ebc38 100644 --- a/scripts/make_operations_grpc.py +++ b/scripts/make_operations_grpc.py @@ -27,8 +27,9 @@ 'bigtable-protos', 'src', 'main', 'proto') PROTO_PATH = os.path.join(PROTOS_DIR, 'google', 'longrunning', 'operations.proto') +GENERATED_SUBDIR = os.environ.get('GENERATED_SUBDIR', '_generated') GRPC_ONLY_FILE = os.path.join(ROOT_DIR, 'gcloud', 'bigtable', - '_generated', 'operations_grpc_pb2.py') + GENERATED_SUBDIR, 'operations_grpc_pb2.py') PROTOC_CMD = os.environ.get('PROTOC_CMD', 'protoc') GRPC_PLUGIN = os.environ.get('GRPC_PLUGIN', 'grpc_python_plugin') diff --git a/scripts/rewrite_imports.py b/scripts/rewrite_imports.py index 7429ec14734c..5717a50fd8e3 100644 --- a/scripts/rewrite_imports.py +++ b/scripts/rewrite_imports.py @@ -18,15 +18,20 @@ and the dependent modules (google/api and google/protobuf). """ -import glob +import sys IMPORT_TEMPLATE = 'import %s' IMPORT_FROM_TEMPLATE = 'from %s import ' REPLACEMENTS = { + # Bigtable v1 'google.bigtable.admin.cluster.v1': 'gcloud.bigtable._generated', 'google.bigtable.admin.table.v1': 'gcloud.bigtable._generated', 'google.bigtable.v1': 'gcloud.bigtable._generated', + # Bigtble v2 + 'google.bigtable.v2': 'gcloud.bigtable._generated_v2', + 'google.bigtable.admin.v2': 'gcloud.bigtable._generated_v2', + # Datastore v1beta3 'google.datastore.v1beta3': 'gcloud.datastore._generated', } @@ -135,9 +140,7 @@ def rewrite_file(filename): def main(): """Rewrites all PB2 files.""" - pb2_files = (glob.glob('gcloud/bigtable/_generated/*pb2.py') + - glob.glob('gcloud/datastore/_generated/*pb2.py')) - for filename in pb2_files: + for filename in sys.argv[1:]: rewrite_file(filename) diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index d24902136e22..7fa7662efb07 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -32,6 +32,7 @@ IGNORED_DIRECTORIES = [ os.path.join('gcloud', 'bigtable', '_generated'), + os.path.join('gcloud', 'bigtable', '_generated_v2'), os.path.join('gcloud', 'datastore', '_generated'), ] IGNORED_FILES = [ diff --git a/tox.ini b/tox.ini index 4d01e63b5d72..c960032c62b4 100644 --- a/tox.ini +++ b/tox.ini @@ -99,7 +99,7 @@ deps = {[testenv:docs]deps} passenv = {[testenv:docs]passenv} [pep8] -exclude = docs/conf.py,gcloud/bigtable/_generated/*,gcloud/datastore/_generated/* +exclude = docs/conf.py,gcloud/bigtable/_generated*/*,gcloud/datastore/_generated/* verbose = 1 [testenv:lint] From 9d7d8af4d7d2582a510176aaf7037eb0b9c5a944 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 24 Jun 2016 17:29:53 -0400 Subject: [PATCH 013/103] Exclude new '_generated_v2' files from coverage. --- .coveragerc | 1 + 1 file changed, 1 insertion(+) diff --git a/.coveragerc b/.coveragerc index 280c5674f5bd..ae05ce469e67 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,6 +1,7 @@ [report] omit = */_generated/*.py + */_generated_v2/*.py show_missing = True exclude_lines = # Re-enable the standard pragma From 4bb2783e1a5052323dfde7fccf941c885240a4b2 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 24 Jun 2016 20:32:47 -0400 Subject: [PATCH 014/103] Add docs, exporting logging to storage permissions. --- docs/logging-usage.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/logging-usage.rst b/docs/logging-usage.rst index 9b23e2edd8ab..f59fe9b7948c 100644 --- a/docs/logging-usage.rst +++ b/docs/logging-usage.rst @@ -212,6 +212,22 @@ Export log entries using sinks Sinks allow exporting entries which match a given filter to Cloud Storage buckets, BigQuery datasets, or Cloud Pub/Sub topics. +Make sure that the storage bucket you want to export logs too has +`cloud-logs@google.com` as the owner. See `Set permission for writing exported logs`_. + +Add `cloud-logs@google.com` as the owner of `my-bucket-name`: + +.. doctest:: + + >>> from gcloud import storage + >>> client = storage.Client() + >>> bucket = client.get_bucket('my-bucket-name') + >>> acl = bucket.acl + >>> acl.user('cloud-logs@google.com').grant_owner() + >>> acl.save() + +.. _Set permission for writing exported logs: https://cloud.google.com/logging/docs/export/configure_export#setting_product_name_short_permissions_for_writing_exported_logs + Create a Cloud Storage sink: .. doctest:: From 9a2d31339b1dd36869d00e5063183c6880952a94 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sat, 25 Jun 2016 23:09:34 -0400 Subject: [PATCH 015/103] Drop 'Table.rename'. It was never actually implemented on the back-end in V1, and has been dropped altogether in V2. --- gcloud/bigtable/table.py | 30 ----------------------- gcloud/bigtable/test_table.py | 45 ----------------------------------- 2 files changed, 75 deletions(-) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 5815086d7c00..c619b7145d71 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -176,36 +176,6 @@ def create(self, initial_split_keys=None): # We expect a `._generated.bigtable_table_data_pb2.Table` client._table_stub.CreateTable(request_pb, client.timeout_seconds) - def rename(self, new_table_id): - """Rename this table. - - .. note:: - - This cannot be used to move tables between clusters, - zones, or projects. - - .. note:: - - The Bigtable Table Admin API currently (``v1``) returns - - ``BigtableTableService.RenameTable is not yet implemented`` - - when this method is used. It's unclear when this method will - actually be supported by the API. - - :type new_table_id: str - :param new_table_id: The new name table ID. - """ - request_pb = messages_pb2.RenameTableRequest( - name=self.name, - new_id=new_table_id, - ) - client = self._cluster._client - # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.RenameTable(request_pb, client.timeout_seconds) - - self.table_id = new_table_id - def delete(self): """Delete this table.""" request_pb = messages_pb2.DeleteTableRequest(name=self.name) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index 9fcdf21593b0..09d5baba225d 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -176,51 +176,6 @@ def test_create_with_split_keys(self): initial_split_keys = ['s1', 's2'] self._create_test_helper(initial_split_keys) - def test_rename(self): - from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - new_table_id = 'new_table_id' - timeout_seconds = 97 - self.assertNotEqual(new_table_id, table_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.RenameTableRequest( - name=table_name, - new_id=new_table_id, - ) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # rename() has no return value. - - # Perform the method and check the result. - result = table.rename(new_table_id) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'RenameTable', - (request_pb, timeout_seconds), - {}, - )]) - def _list_column_families_helper(self, column_family_name=None): from gcloud.bigtable._generated import ( bigtable_table_data_pb2 as data_pb2) From f5b1719090be84280d53ff6eba89687da4c79500 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 15:52:48 -0400 Subject: [PATCH 016/103] Drop overlooked docs / system test usage of 'Table.rename'. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1908#issuecomment-228618222 --- docs/bigtable-table-api.rst | 11 ----------- system_tests/bigtable.py | 19 ------------------- 2 files changed, 30 deletions(-) diff --git a/docs/bigtable-table-api.rst b/docs/bigtable-table-api.rst index 6ef4dba1e7e0..78ac3c6f079a 100644 --- a/docs/bigtable-table-api.rst +++ b/docs/bigtable-table-api.rst @@ -65,17 +65,6 @@ Make a `DeleteTable`_ API request with table.delete() -Rename an existing Table ------------------------- - -Though the `RenameTable`_ API request is listed in the service -definition, requests to that method return:: - - BigtableTableService.RenameTable is not yet implemented - -We have implemented :meth:`rename() ` -but it will not work unless the backend supports the method. - List Column Families in a Table ------------------------------- diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index 4572766bccc3..3259adea6c15 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -232,25 +232,6 @@ def test_create_table(self): sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) - def test_rename_table(self): - from grpc.beta import interfaces - from grpc.framework.interfaces.face import face - - temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - with self.assertRaises(face.LocalError) as exc_manager: - temp_table.rename(temp_table_id + '-alt') - exc_caught = exc_manager.exception - self.assertNotEqual(exc_caught, None) - self.assertEqual(exc_caught.code, - interfaces.StatusCode.UNIMPLEMENTED) - self.assertEqual( - exc_caught.details, - 'BigtableTableService.RenameTable is not yet implemented') - def test_create_column_family(self): temp_table_id = 'foo-bar-baz-table' temp_table = Config.CLUSTER.table(temp_table_id) From 16b04c935099b5bf45692223b18f03633df39a56 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sat, 25 Jun 2016 10:56:22 -0400 Subject: [PATCH 017/103] Parse JSON acceptance tests. --- .../bigtable/read-rows-acceptance-test.json | 1178 +++++++++++++++++ gcloud/bigtable/test_row_data.py | 31 + 2 files changed, 1209 insertions(+) create mode 100644 gcloud/bigtable/read-rows-acceptance-test.json diff --git a/gcloud/bigtable/read-rows-acceptance-test.json b/gcloud/bigtable/read-rows-acceptance-test.json new file mode 100644 index 000000000000..4973831f4979 --- /dev/null +++ b/gcloud/bigtable/read-rows-acceptance-test.json @@ -0,0 +1,1178 @@ +{ + "tests": [ + { + "name": "invalid - no commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before commit", + "chunks": [ + "commit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before value", + "chunks": [ + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new col family must specify qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "bare commit implies ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "simple row with timestamp", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "missing timestamp, implied ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "empty cell value", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "two unsplit cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two qualifiers", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two families", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "with labels", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "L_2", + "error": false + } + ] + }, + { + "name": "split cell, bare commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "split cell", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "split four ways", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"ue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "L", + "error": false + } + ] + }, + { + "name": "two split cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier multi-split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-family split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - no commit between rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no commit after first row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - last row missing commit", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - duplicate row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new row missing row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "two rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows implicit timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows empty value", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, one with multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "E", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "F", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells, multiple families", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "M", + "qual": "O", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "N", + "qual": "P", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, four cells, 2 labels", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "L_3", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows with splits, same timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - bare reset", + "chunks": [ + "reset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - bad reset, no commit", + "chunks": [ + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - missing key after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "no data after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n" + ], + "results": null + }, + { + "name": "simple reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new val", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new qual", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "reset with splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two resets", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset then two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "B", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset in between chunks", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - reset with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - commit with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "empty cell chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index 56b1c15f0655..4dd0a8cd1723 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -522,3 +522,34 @@ def cancel(self): def next(self): return next(self.iter_values) + + +def _generate_cell_chunks(chunk_text_pbs): + from google.protobuf.text_format import Merge + from gcloud.bigtable._generated_v2.bigtable_pb2 import ReadRowsResponse + + chunks = [] + + for chunk_text_pb in chunk_text_pbs: + chunk = ReadRowsResponse.CellChunk() + chunks.append(Merge(chunk_text_pb, chunk)) + + return chunks + + +def _parse_readrows_acceptance_tests(filename): + """Parse acceptance tests from JSON + + See: + https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/master/bigtable-client-core/src/test/resources/com/google/cloud/bigtable/grpc/scanner/v2/read-rows-acceptance-test.json + """ + import json + + with open(filename) as json_file: + test_json = json.load(json_file) + + for test in test_json['tests']: + name = test['name'] + chunks = _generate_cell_chunks(test['chunks']) + results = test['results'] + yield name, chunks, results From ebf27b58ce063e5ad3b3c029e99a1799d3bf13d6 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sat, 25 Jun 2016 20:40:23 -0400 Subject: [PATCH 018/103] Add 'ReadRowsResponseV2' wrapper. Processes the 'ReadRowsResponse.CellChunk' state machine. Add tests based on JSON acceptance tests: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/master/bigtable-client-core/src/test/resources/com/google/cloud/bigtable/grpc/scanner/v2/read-rows-acceptance-test.json. --- gcloud/bigtable/row_data.py | 252 ++++++++++++++++++++ gcloud/bigtable/test_row_data.py | 392 +++++++++++++++++++++++++++++++ 2 files changed, 644 insertions(+) diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py index e64a242f8507..5191a1759b16 100644 --- a/gcloud/bigtable/row_data.py +++ b/gcloud/bigtable/row_data.py @@ -321,3 +321,255 @@ def consume_all(self, max_loops=None): self.consume_next() except StopIteration: break + + +class ReadRowsResponseError(RuntimeError): + """Exception raised to to invalid chunk / response data from back-end.""" + + +def _raise_if(predicate): + """Helper for validation methods.""" + if predicate: + raise ReadRowsResponseError() + + +class PartialCellV2(object): + """Data for a not-yet-complete cell.""" + + def __init__(self, row_key, family_name, qualifier, timestamp_micros, + labels=(), value=b''): + self.row_key = row_key + self.family_name = family_name + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels + self.value = value + + def append_value(self, value): + """Append bytes from a new chunk to value. + + :type value: bytes + :param value: bytes to append + """ + self.value += value + + +class PartialRowsDataV2(object): + """Handle state involved in consuming a ``ReadRows`` streaming response. + + :type response_iterator: + :class:`grpc.framework.alpha._reexport._CancellableIterator` returning + :class:`gcloud.bigtable._generated_v2.bigtable_pb2.ReadRowsResponse` + :param response_iterator: + A streaming iterator returned from a ``ReadRows`` request. + """ + # State names + START = "Start" + NEW_ROW = "New row" + ROW_IN_PROGRESS = "Row in progress" + CELL_IN_PROGRESS = "Cell in progress" + + def __init__(self, response_iterator): + self._response_iterator = response_iterator + # Fully-processed rows, keyed by `row_key` + self._rows = {} + # Counter for responses pulled from iterator + self._counter = 0 + # Maybe cached from previous response + self._last_scanned_row_key = None + # In-progress row, unset until first response, after commit/reset + self._row = None + # Last complete row, unset until first commit + self._previous_row = None + # In-progress cell, unset until first response, after completion + self._cell = None + # Last complete cell, unset until first completion, after new row + self._previous_cell = None + + @property + def state(self): + """Name of state machine state.""" + if self._last_scanned_row_key is None: + return self.START + if self._row is None: + assert self._cell is None + assert self._previous_cell is None + return self.NEW_ROW + if self._cell is not None: + return self.CELL_IN_PROGRESS + if self._previous_cell is not None: + return self.ROW_IN_PROGRESS + return self.NEW_ROW # row added, no chunk yet processed + + @property + def rows(self): + """Property returning all rows accumulated from the stream. + + :rtype: dict + :returns: Dictionary of :class:`PartialRowData`. + """ + _raise_if(self.state not in (self.NEW_ROW,)) + # NOTE: To avoid duplicating large objects, this is just the + # mutable private data. + return self._rows + + @staticmethod + def _validate_chunk_status(chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`, etc.""" + # No reseet with other keys + if chunk.reset_row: + _raise_if(chunk.row_key) + _raise_if(chunk.HasField('family_name')) + _raise_if(chunk.HasField('qualifier')) + _raise_if(chunk.timestamp_micros) + _raise_if(chunk.labels) + _raise_if(chunk.value_size) + _raise_if(chunk.value) + # No commit with value size + _raise_if(chunk.commit_row and chunk.value_size > 0) + # No negative value_size (inferred as a general constraint). + _raise_if(chunk.value_size < 0) + + def _validate_chunk_new_row(self, chunk): + """Helper for :meth:`_validate_chunk`.""" + assert self.state == self.NEW_ROW + _raise_if(chunk.reset_row) + _raise_if(not chunk.row_key) + _raise_if(not chunk.family_name) + _raise_if(not chunk.qualifier) + # This constraint is not enforced in the Go example. + _raise_if(chunk.value_size > 0 and chunk.commit_row is not False) + # This constraint is from the Go example, not the spec. + _raise_if(self._previous_row is not None and + chunk.row_key <= self._previous_row.row_key) + + def _same_as_previous(self, chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`""" + previous = self._previous_cell + return (chunk.row_key == previous.row_key and + chunk.family_name == previous.family_name and + chunk.qualifier == previous.qualifier and + chunk.labels == previous.labels) + + def _validate_chunk_row_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.ROW_IN_PROGRESS + self._validate_chunk_status(chunk) + if not chunk.HasField('commit_row') and not chunk.reset_row: + _raise_if(not chunk.timestamp_micros or not chunk.value) + _raise_if(chunk.row_key and + chunk.row_key != self._row.row_key) + _raise_if(chunk.HasField('family_name') and + not chunk.HasField('qualifier')) + previous = self._previous_cell + _raise_if(self._same_as_previous(chunk) and + chunk.timestamp_micros <= previous.timestamp_micros) + + def _validate_chunk_cell_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.CELL_IN_PROGRESS + self._validate_chunk_status(chunk) + self._copy_from_current(chunk) + + def _validate_chunk(self, chunk): + """Helper for :meth:`consume_next`.""" + if self.state == self.NEW_ROW: + self._validate_chunk_new_row(chunk) + if self.state == self.ROW_IN_PROGRESS: + self._validate_chunk_row_in_progress(chunk) + if self.state == self.CELL_IN_PROGRESS: + self._validate_chunk_cell_in_progress(chunk) + + def _save_current_cell(self): + """Helper for :meth:`consume_next`.""" + row, cell = self._row, self._cell + family = row._cells.setdefault(cell.family_name, {}) + qualified = family.setdefault(cell.qualifier, []) + complete = Cell.from_pb(self._cell) + qualified.append(complete) + self._cell, self._previous_cell = None, cell + + def _copy_from_current(self, chunk): + """Helper for :meth:`consume_next`.""" + current = self._cell + if current is not None: + if not chunk.row_key: + chunk.row_key = current.row_key + if not chunk.HasField('family_name'): + chunk.family_name.value = current.family_name + if not chunk.HasField('qualifier'): + chunk.qualifier.value = current.qualifier + if not chunk.timestamp_micros: + chunk.timestamp_micros = current.timestamp_micros + if not chunk.labels: + chunk.labels.extend(current.labels) + + def _copy_from_previous(self, cell): + """Helper for :meth:`consume_next`.""" + previous = self._previous_cell + if previous is not None: + if not cell.row_key: + cell.row_key = previous.row_key + if not cell.family_name: + cell.family_name = previous.family_name + if not cell.qualifier: + cell.qualifier = previous.qualifier + + def _save_current_row(self): + """Helper for :meth:`consume_next`.""" + if self._cell: + self._save_current_cell() + self._rows[self._row.row_key] = self._row + self._row, self._previous_row = None, self._row + self._previous_cell = None + + def consume_next(self): + """Consume the next ``ReadRowsResponse`` from the stream. + + Parse the response and its chunks into a new/existing row in + :attr:`_rows` + """ + response = self._response_iterator.next() + self._counter += 1 + + if self._last_scanned_row_key is None: # first response + if response.last_scanned_row_key: + raise ReadRowsResponseError() + + self._last_scanned_row_key = response.last_scanned_row_key + + row = self._row + cell = self._cell + + for chunk in response.chunks: + + self._validate_chunk(chunk) + + if chunk.reset_row: + row = self._row = None + cell = self._cell = self._previous_cell = None + continue + + if row is None: + row = self._row = PartialRowData(chunk.row_key) + + if cell is None: + cell = self._cell = PartialCellV2( + chunk.row_key, + chunk.family_name.value, + chunk.qualifier.value, + chunk.timestamp_micros, + chunk.labels, + chunk.value) + self._copy_from_previous(cell) + else: + cell.append_value(chunk.value) + + if chunk.commit_row: + self._save_current_row() + row = cell = None + continue + + if chunk.value_size == 0: + self._save_current_cell() + cell = None diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index 4dd0a8cd1723..ef7690505d90 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -510,6 +510,373 @@ def test_consume_all_with_max_loops(self): self.assertEqual(list(response_iterator.iter_values), [value2, value3]) +class TestPartialRowsDataV2(unittest2.TestCase): + + _json_tests = None + + def _getTargetClass(self): + from gcloud.bigtable.row_data import PartialRowsDataV2 + return PartialRowsDataV2 + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _load_json_test(self, test_name): + import os + if self.__class__._json_tests is None: + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, 'read-rows-acceptance-test.json') + raw = _parse_readrows_acceptance_tests(filename) + tests = self.__class__._json_tests = {} + for (name, chunks, results) in raw: + tests[name] = chunks, results + return self.__class__._json_tests[test_name] + + # Not part of the JSON acceptance tests. + + def test_state_start(self): + prd = self._makeOne([]) + self.assertEqual(prd.state, prd.START) + + def test_state_new_row_w_row(self): + prd = self._makeOne([]) + prd._last_scanned_row_key = '' + prd._row = object() + self.assertEqual(prd.state, prd.NEW_ROW) + + def test__copy_from_current_unset(self): + prd = self._makeOne([]) + chunks = _generate_cell_chunks(['']) + chunk = chunks[0] + prd._copy_from_current(chunk) + self.assertEqual(chunk.row_key, b'') + self.assertEqual(chunk.family_name.value, u'') + self.assertEqual(chunk.qualifier.value, b'') + self.assertEqual(chunk.timestamp_micros, 0) + self.assertEqual(chunk.labels, []) + + def test__copy_from_current_blank(self): + ROW_KEY = b'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + prd._cell = _PartialCellV2() + chunks = _generate_cell_chunks(['']) + chunk = chunks[0] + chunk.row_key = ROW_KEY + chunk.family_name.value = FAMILY_NAME + chunk.qualifier.value = QUALIFIER + chunk.timestamp_micros = TIMESTAMP_MICROS + chunk.labels.extend(LABELS) + prd._copy_from_current(chunk) + self.assertEqual(chunk.row_key, ROW_KEY) + self.assertEqual(chunk.family_name.value, FAMILY_NAME) + self.assertEqual(chunk.qualifier.value, QUALIFIER) + self.assertEqual(chunk.timestamp_micros, TIMESTAMP_MICROS) + self.assertEqual(chunk.labels, LABELS) + + def test__copy_from_previous_unset(self): + prd = self._makeOne([]) + cell = _PartialCellV2() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, '') + self.assertEqual(cell.family_name, u'') + self.assertEqual(cell.qualifier, b'') + self.assertEqual(cell.timestamp_micros, 0) + self.assertEqual(cell.labels, []) + + def test__copy_from_previous_blank(self): + ROW_KEY = 'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + cell = _PartialCellV2( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + prd._previous_cell = _PartialCellV2() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, ROW_KEY) + self.assertEqual(cell.family_name, FAMILY_NAME) + self.assertEqual(cell.qualifier, QUALIFIER) + self.assertEqual(cell.timestamp_micros, TIMESTAMP_MICROS) + self.assertEqual(cell.labels, LABELS) + + def test__copy_from_previous_filled(self): + ROW_KEY = 'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + prd._previous_cell = _PartialCellV2( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + cell = _PartialCellV2() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, ROW_KEY) + self.assertEqual(cell.family_name, FAMILY_NAME) + self.assertEqual(cell.qualifier, QUALIFIER) + self.assertEqual(cell.timestamp_micros, 0) + self.assertEqual(cell.labels, []) + + def test__save_row_no_cell(self): + ROW_KEY = 'RK' + prd = self._makeOne([]) + row = prd._row = _Dummy(row_key=ROW_KEY) + prd._cell = None + prd._save_current_row() + self.assertTrue(prd._rows[ROW_KEY] is row) + + def test_invalid_last_scanned_row_key_on_start(self): + from gcloud.bigtable.row_data import ReadRowsResponseError + response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(ReadRowsResponseError): + prd.consume_next() + + def test_valid_last_scanned_row_key_on_start(self): + response = _ReadRowsResponseV2( + chunks=(), last_scanned_row_key='AFTER') + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd._last_scanned_row_key = 'BEFORE' + prd.consume_next() + self.assertEqual(prd._last_scanned_row_key, 'AFTER') + + def test_invalid_empty_chunk(self): + from gcloud.bigtable.row_data import ReadRowsResponseError + chunks = _generate_cell_chunks(['']) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(ReadRowsResponseError): + prd.consume_next() + + def test_invalid_empty_second_chunk(self): + from gcloud.bigtable.row_data import ReadRowsResponseError + chunks = _generate_cell_chunks(['', '']) + first = chunks[0] + first.row_key = b'RK' + first.family_name.value = 'A' + first.qualifier.value = b'C' + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(ReadRowsResponseError): + prd.consume_next() + + # JSON Error cases + + def _fail_during_consume(self, testcase_name): + from gcloud.bigtable.row_data import ReadRowsResponseError + chunks, _ = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(ReadRowsResponseError): + prd.consume_next() + + def _fail_during_rows(self, testcase_name): + from gcloud.bigtable.row_data import ReadRowsResponseError + chunks, _ = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd.consume_next() + with self.assertRaises(ReadRowsResponseError): + _ = prd.rows + + def test_invalid_no_commit(self): + self._fail_during_rows('invalid - no commit') + + def test_invalid_no_cell_key_before_commit(self): + self._fail_during_consume('invalid - no cell key before commit') + + def test_invalid_no_cell_key_before_value(self): + self._fail_during_consume('invalid - no cell key before value') + + def test_invalid_new_col_family_wo_qualifier(self): + self._fail_during_consume( + 'invalid - new col family must specify qualifier') + + def test_invalid_no_commit_between_rows(self): + self._fail_during_consume('invalid - no commit between rows') + + def test_invalid_no_commit_after_first_row(self): + self._fail_during_consume('invalid - no commit after first row') + + def test_invalid_last_row_missing_commit(self): + self._fail_during_rows('invalid - last row missing commit') + + def test_invalid_duplicate_row_key(self): + self._fail_during_consume('invalid - duplicate row key') + + def test_invalid_new_row_missing_row_key(self): + self._fail_during_consume('invalid - new row missing row key') + + def test_invalid_bare_reset(self): + self._fail_during_consume('invalid - bare reset') + + def test_invalid_bad_reset_no_commit(self): + self._fail_during_consume('invalid - bad reset, no commit') + + def test_invalid_missing_key_after_reset(self): + self._fail_during_consume('invalid - missing key after reset') + + def test_invalid_reset_with_chunk(self): + self._fail_during_consume('invalid - reset with chunk') + + def test_invalid_commit_with_chunk(self): + self._fail_during_consume('invalid - commit with chunk') + + # Non-error cases + + _marker = object() + + def _match_results(self, testcase_name, expected_result=_marker): + import operator + key_func = operator.itemgetter('rk', 'fm', 'qual') + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd.consume_next() + flattened = sorted(_flatten_cells(prd), key=key_func) + if expected_result is self._marker: + expected_result = sorted(results, key=key_func) + self.assertEqual(flattened, expected_result) + + def test_bare_commit_implies_ts_zero(self): + self._match_results('bare commit implies ts=0') + + def test_simple_row_with_timestamp(self): + self._match_results('simple row with timestamp') + + def test_missing_timestamp_implies_ts_zero(self): + self._match_results('missing timestamp, implied ts=0') + + def test_empty_cell_value(self): + self._match_results('empty cell value') + + def test_two_unsplit_cells(self): + self._match_results('two unsplit cells') + + def test_two_qualifiers(self): + self._match_results('two qualifiers') + + def test_two_families(self): + self._match_results('two families') + + def test_with_labels(self): + self._match_results('with labels') + + def test_split_cell_bare_commit(self): + self._match_results('split cell, bare commit') + + def test_split_cell(self): + self._match_results('split cell') + + def test_split_four_ways(self): + self._match_results('split four ways') + + def test_two_split_cells(self): + self._match_results('two split cells') + + def test_multi_qualifier_splits(self): + self._match_results('multi-qualifier splits') + + def test_multi_qualifier_multi_split(self): + self._match_results('multi-qualifier multi-split') + + def test_multi_family_split(self): + self._match_results('multi-family split') + + def test_two_rows(self): + self._match_results('two rows') + + def test_two_rows_implicit_timestamp(self): + self._match_results('two rows implicit timestamp') + + def test_two_rows_empty_value(self): + self._match_results('two rows empty value') + + def test_two_rows_one_with_multiple_cells(self): + self._match_results('two rows, one with multiple cells') + + def test_two_rows_multiple_cells_multiple_families(self): + self._match_results('two rows, multiple cells, multiple families') + + def test_two_rows_multiple_cells(self): + self._match_results('two rows, multiple cells') + + def test_two_rows_four_cells_two_labels(self): + self._match_results('two rows, four cells, 2 labels') + + def test_two_rows_with_splits_same_timestamp(self): + self._match_results('two rows with splits, same timestamp') + + def test_no_data_after_reset(self): + # JSON testcase has `"results": null` + self._match_results('no data after reset', expected_result=[]) + + def test_simple_reset(self): + self._match_results('simple reset') + + def test_reset_to_new_val(self): + self._match_results('reset to new val') + + def test_reset_to_new_qual(self): + self._match_results('reset to new qual') + + def test_reset_with_splits(self): + self._match_results('reset with splits') + + def test_two_resets(self): + self._match_results('two resets') + + def test_reset_to_new_row(self): + self._match_results('reset to new row') + + def test_reset_in_between_chunks(self): + self._match_results('reset in between chunks') + + def test_empty_cell_chunk(self): + self._match_results('empty cell chunk') + + +def _flatten_cells(prd): + # Match results format from JSON testcases. + # Doesn't handle error cases. + from gcloud._helpers import _bytes_to_unicode + from gcloud._helpers import _microseconds_from_datetime + for row_key, row in prd.rows.items(): + for family_name, family in row.cells.items(): + for qualifier, column in family.items(): + for cell in column: + yield { + u'rk': _bytes_to_unicode(row_key), + u'fm': family_name, + u'qual': _bytes_to_unicode(qualifier), + u'ts': _microseconds_from_datetime(cell.timestamp), + u'value': _bytes_to_unicode(cell.value), + u'label': u' '.join(cell.labels), + u'error': False, + } + + class _MockCancellableIterator(object): cancel_calls = 0 @@ -524,6 +891,31 @@ def next(self): return next(self.iter_values) +class _Dummy(object): + + def __init__(self, **kw): + self.__dict__.update(kw) + + +class _PartialCellV2(object): + + row_key = '' + family_name = u'' + qualifier = b'' + timestamp_micros = 0 + + def __init__(self, **kw): + self.labels = kw.pop('labels', []) + self.__dict__.update(kw) + + +class _ReadRowsResponseV2(object): + + def __init__(self, chunks, last_scanned_row_key=''): + self.chunks = chunks + self.last_scanned_row_key = last_scanned_row_key + + def _generate_cell_chunks(chunk_text_pbs): from google.protobuf.text_format import Merge from gcloud.bigtable._generated_v2.bigtable_pb2 import ReadRowsResponse From ed8edb42a6c2aaaddfb9c929d56903a338d0fc27 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 16:34:28 -0400 Subject: [PATCH 019/103] Disable GAX bundling for 'Topic.publish'. Closes #1869. Works around https://github.com/googleapis/gax-python/issues/113. --- gcloud/pubsub/_gax.py | 8 ++++---- gcloud/pubsub/test__gax.py | 33 ++++----------------------------- 2 files changed, 8 insertions(+), 33 deletions(-) diff --git a/gcloud/pubsub/_gax.py b/gcloud/pubsub/_gax.py index 0639833feb73..28ac6c23e294 100644 --- a/gcloud/pubsub/_gax.py +++ b/gcloud/pubsub/_gax.py @@ -162,17 +162,17 @@ def topic_publish(self, topic_path, messages): :raises: :exc:`gcloud.exceptions.NotFound` if the topic does not exist """ + options = CallOptions(is_bundling=False) message_pbs = [_message_pb_from_dict(message) for message in messages] try: - event = self._gax_api.publish(topic_path, message_pbs) - if not event.is_set(): - event.wait() + result = self._gax_api.publish(topic_path, message_pbs, + options=options) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise - return event.result.message_ids + return result.message_ids def topic_list_subscriptions(self, topic_path, page_size=0, page_token=None): diff --git a/gcloud/pubsub/test__gax.py b/gcloud/pubsub/test__gax.py index d285cb6e3260..2426d2dfb7e8 100644 --- a/gcloud/pubsub/test__gax.py +++ b/gcloud/pubsub/test__gax.py @@ -204,15 +204,12 @@ def test_topic_delete_error(self): def test_topic_publish_hit(self): import base64 - from gcloud._testing import _GAXBundlingEvent PAYLOAD = b'This is the message text' B64 = base64.b64encode(PAYLOAD).decode('ascii') MSGID = 'DEADBEEF' MESSAGE = {'data': B64, 'attributes': {}} response = _PublishResponsePB([MSGID]) - event = _GAXBundlingEvent(response) - event.wait() # already received result - gax_api = _GAXPublisherAPI(_publish_response=event) + gax_api = _GAXPublisherAPI(_publish_response=response) api = self._makeOne(gax_api) resource = api.topic_publish(self.TOPIC_PATH, [MESSAGE]) @@ -223,29 +220,7 @@ def test_topic_publish_hit(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options, None) - - def test_topic_publish_hit_with_wait(self): - import base64 - from gcloud._testing import _GAXBundlingEvent - PAYLOAD = b'This is the message text' - B64 = base64.b64encode(PAYLOAD).decode('ascii') - MSGID = 'DEADBEEF' - MESSAGE = {'data': B64, 'attributes': {}} - response = _PublishResponsePB([MSGID]) - event = _GAXBundlingEvent(response) - gax_api = _GAXPublisherAPI(_publish_response=event) - api = self._makeOne(gax_api) - - resource = api.topic_publish(self.TOPIC_PATH, [MESSAGE]) - - self.assertEqual(resource, [MSGID]) - topic_path, message_pbs, options = gax_api._publish_called_with - self.assertEqual(topic_path, self.TOPIC_PATH) - message_pb, = message_pbs - self.assertEqual(message_pb.data, B64) - self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options, None) + self.assertEqual(options.is_bundling, False) def test_topic_publish_miss_w_attrs_w_bytes_payload(self): import base64 @@ -264,7 +239,7 @@ def test_topic_publish_miss_w_attrs_w_bytes_payload(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {'foo': 'bar'}) - self.assertEqual(options, None) + self.assertEqual(options.is_bundling, False) def test_topic_publish_error(self): import base64 @@ -283,7 +258,7 @@ def test_topic_publish_error(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options, None) + self.assertEqual(options.is_bundling, False) def test_topic_list_subscriptions_no_paging(self): from google.gax import INITIAL_PAGE From b4f4d188d6c87b80a75604394d3a154ee63cef3f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 16:42:48 -0400 Subject: [PATCH 020/103] Drop '_GaxBundlingEvent' testing fossil. --- gcloud/_testing.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/gcloud/_testing.py b/gcloud/_testing.py index 15ef5dd298e1..0a440e817436 100644 --- a/gcloud/_testing.py +++ b/gcloud/_testing.py @@ -59,17 +59,3 @@ def __init__(self, items, page_token): def next(self): items, self._items = self._items, None return items - - -class _GAXBundlingEvent(object): - - result = None - - def __init__(self, result): - self._result = result - - def is_set(self): - return self.result is not None - - def wait(self, *_): - self.result = self._result From 47b9246197092c507153dc777254eb0403ef1c40 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 17:58:21 -0400 Subject: [PATCH 021/103] Alias Bigtable V1 imports / factories / entry point constants. Indicate their V1 source in their names. Prepratory to converting to V2 equivalents. --- gcloud/bigtable/client.py | 76 ++++++---- gcloud/bigtable/cluster.py | 33 ++-- gcloud/bigtable/column_family.py | 42 +++--- gcloud/bigtable/row.py | 47 +++--- gcloud/bigtable/row_filters.py | 100 ++++++------ gcloud/bigtable/table.py | 25 +-- gcloud/bigtable/test_client.py | 82 +++++----- gcloud/bigtable/test_cluster.py | 112 +++++++------- gcloud/bigtable/test_column_family.py | 110 ++++++++------ gcloud/bigtable/test_row.py | 132 ++++++++-------- gcloud/bigtable/test_row_data.py | 105 +++++++------ gcloud/bigtable/test_row_filters.py | 209 +++++++++++++++----------- gcloud/bigtable/test_table.py | 90 +++++------ 13 files changed, 631 insertions(+), 532 deletions(-) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 49faf42c8a25..0b97922894e4 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -31,40 +31,52 @@ from grpc.beta import implementations -from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2 -from gcloud.bigtable._generated import bigtable_cluster_service_pb2 +# Cluster admin service is V1-only (V2 provides instance admin instead) from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) -from gcloud.bigtable._generated import bigtable_service_pb2 -from gcloud.bigtable._generated import bigtable_table_service_pb2 -from gcloud.bigtable._generated import operations_grpc_pb2 + bigtable_cluster_data_pb2 as cluster_data_v1_pb2) +from gcloud.bigtable._generated import ( + bigtable_cluster_service_pb2 as cluster_service_v1_pb2) +from gcloud.bigtable._generated import ( + bigtable_cluster_service_messages_pb2 as cluster_messages_v1_pb2) +# V1 table admin service +from gcloud.bigtable._generated import ( + bigtable_table_service_pb2 as table_service_v1_pb2) +# V1 data service +from gcloud.bigtable._generated import ( + bigtable_service_pb2 as data_service_v1_pb2) + +from gcloud.bigtable._generated import ( + operations_grpc_pb2 as operations_grpc_v1_pb2) + from gcloud.bigtable.cluster import Cluster from gcloud.client import _ClientFactoryMixin from gcloud.client import _ClientProjectMixin from gcloud.credentials import get_credentials -TABLE_STUB_FACTORY = ( - bigtable_table_service_pb2.beta_create_BigtableTableService_stub) -TABLE_ADMIN_HOST = 'bigtabletableadmin.googleapis.com' +TABLE_STUB_FACTORY_V1 = ( + table_service_v1_pb2.beta_create_BigtableTableService_stub) +TABLE_ADMIN_HOST_V1 = 'bigtabletableadmin.googleapis.com' """Table Admin API request host.""" -TABLE_ADMIN_PORT = 443 +TABLE_ADMIN_PORT_V1 = 443 """Table Admin API request port.""" -CLUSTER_STUB_FACTORY = ( - bigtable_cluster_service_pb2.beta_create_BigtableClusterService_stub) -CLUSTER_ADMIN_HOST = 'bigtableclusteradmin.googleapis.com' +CLUSTER_STUB_FACTORY_V1 = ( + cluster_service_v1_pb2.beta_create_BigtableClusterService_stub) +CLUSTER_ADMIN_HOST_V1 = 'bigtableclusteradmin.googleapis.com' """Cluster Admin API request host.""" -CLUSTER_ADMIN_PORT = 443 +CLUSTER_ADMIN_PORT_V1 = 443 """Cluster Admin API request port.""" -DATA_STUB_FACTORY = bigtable_service_pb2.beta_create_BigtableService_stub -DATA_API_HOST = 'bigtable.googleapis.com' +DATA_STUB_FACTORY_V1 = data_service_v1_pb2.beta_create_BigtableService_stub +DATA_API_HOST_V1 = 'bigtable.googleapis.com' """Data API request host.""" -DATA_API_PORT = 443 +DATA_API_PORT_V1 = 443 """Data API request port.""" -OPERATIONS_STUB_FACTORY = operations_grpc_pb2.beta_create_Operations_stub +OPERATIONS_STUB_FACTORY_V1 = operations_grpc_v1_pb2.beta_create_Operations_stub +OPERATIONS_API_HOST_V1 = CLUSTER_ADMIN_HOST_V1 +OPERATIONS_API_PORT_V1 = CLUSTER_ADMIN_PORT_V1 ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' """Scope for interacting with the Cluster Admin and Table Admin APIs.""" @@ -275,8 +287,8 @@ def _make_data_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, DATA_STUB_FACTORY, - DATA_API_HOST, DATA_API_PORT) + return _make_stub(self, DATA_STUB_FACTORY_V1, + DATA_API_HOST_V1, DATA_API_PORT_V1) def _make_cluster_stub(self): """Creates gRPC stub to make requests to the Cluster Admin API. @@ -284,8 +296,8 @@ def _make_cluster_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, CLUSTER_STUB_FACTORY, - CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT) + return _make_stub(self, CLUSTER_STUB_FACTORY_V1, + CLUSTER_ADMIN_HOST_V1, CLUSTER_ADMIN_PORT_V1) def _make_operations_stub(self): """Creates gRPC stub to make requests to the Operations API. @@ -296,8 +308,8 @@ def _make_operations_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, OPERATIONS_STUB_FACTORY, - CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT) + return _make_stub(self, OPERATIONS_STUB_FACTORY_V1, + OPERATIONS_API_HOST_V1, OPERATIONS_API_PORT_V1) def _make_table_stub(self): """Creates gRPC stub to make requests to the Table Admin API. @@ -305,8 +317,8 @@ def _make_table_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, TABLE_STUB_FACTORY, - TABLE_ADMIN_HOST, TABLE_ADMIN_PORT) + return _make_stub(self, TABLE_STUB_FACTORY_V1, + TABLE_ADMIN_HOST_V1, TABLE_ADMIN_PORT_V1) def is_started(self): """Check if the client has been started. @@ -401,14 +413,15 @@ def list_zones(self): :raises: :class:`ValueError ` if one of the zones is not in ``OK`` state. """ - request_pb = messages_pb2.ListZonesRequest(name=self.project_name) - # We expect a `.messages_pb2.ListZonesResponse` + request_pb = cluster_messages_v1_pb2.ListZonesRequest( + name=self.project_name) + # We expect a `.cluster_messages_v1_pb2.ListZonesResponse` list_zones_response = self._cluster_stub.ListZones( request_pb, self.timeout_seconds) result = [] for zone in list_zones_response.zones: - if zone.status != data_pb2.Zone.OK: + if zone.status != cluster_data_v1_pb2.Zone.OK: raise ValueError('Zone %s not in OK state' % ( zone.display_name,)) result.append(zone.display_name) @@ -422,8 +435,9 @@ def list_clusters(self): returned and the second is a list of strings (the failed zones in the request). """ - request_pb = messages_pb2.ListClustersRequest(name=self.project_name) - # We expect a `.messages_pb2.ListClustersResponse` + request_pb = cluster_messages_v1_pb2.ListClustersRequest( + name=self.project_name) + # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` list_clusters_response = self._cluster_stub.ListClusters( request_pb, self.timeout_seconds) diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index 55e7a49b446d..28875730c292 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -20,11 +20,12 @@ from google.longrunning import operations_pb2 from gcloud._helpers import _pb_timestamp_to_datetime -from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) +from gcloud.bigtable._generated import ( + bigtable_table_service_messages_pb2 as table_messages_v1_pb2) from gcloud.bigtable.table import Table @@ -40,9 +41,9 @@ _UPDATE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UpdateClusterMetadata' _UNDELETE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UndeleteClusterMetadata' _TYPE_URL_MAP = { - _CLUSTER_CREATE_METADATA: messages_pb2.CreateClusterMetadata, - _UPDATE_CREATE_METADATA: messages_pb2.UpdateClusterMetadata, - _UNDELETE_CREATE_METADATA: messages_pb2.UndeleteClusterMetadata, + _CLUSTER_CREATE_METADATA: messages_v1_pb2.CreateClusterMetadata, + _UPDATE_CREATE_METADATA: messages_v1_pb2.UpdateClusterMetadata, + _UNDELETE_CREATE_METADATA: messages_v1_pb2.UndeleteClusterMetadata, } DEFAULT_SERVE_NODES = 3 @@ -55,15 +56,15 @@ def _prepare_create_request(cluster): :type cluster: :class:`Cluster` :param cluster: The cluster to be created. - :rtype: :class:`.messages_pb2.CreateClusterRequest` + :rtype: :class:`.messages_v1_pb2.CreateClusterRequest` :returns: The CreateCluster request object containing the cluster info. """ zone_full_name = ('projects/' + cluster._client.project + '/zones/' + cluster.zone) - return messages_pb2.CreateClusterRequest( + return messages_v1_pb2.CreateClusterRequest( name=zone_full_name, cluster_id=cluster.cluster_id, - cluster=data_pb2.Cluster( + cluster=data_v1_pb2.Cluster( display_name=cluster.display_name, serve_nodes=cluster.serve_nodes, ), @@ -198,7 +199,7 @@ class Cluster(object): .. note:: For now, we leave out the ``default_storage_type`` (an enum) - which if not sent will end up as :data:`.data_pb2.STORAGE_SSD`. + which if not sent will end up as :data:`.data_v1_pb2.STORAGE_SSD`. :type zone: str :param zone: The name of the zone where the cluster resides. @@ -332,7 +333,7 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this cluster.""" - request_pb = messages_pb2.GetClusterRequest(name=self.name) + request_pb = messages_v1_pb2.GetClusterRequest(name=self.name) # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. cluster_pb = self._client._cluster_stub.GetCluster( request_pb, self._client.timeout_seconds) @@ -389,7 +390,7 @@ def update(self): :returns: The long-running operation corresponding to the update operation. """ - request_pb = data_pb2.Cluster( + request_pb = data_v1_pb2.Cluster( name=self.name, display_name=self.display_name, serve_nodes=self.serve_nodes, @@ -426,7 +427,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - request_pb = messages_pb2.DeleteClusterRequest(name=self.name) + request_pb = messages_v1_pb2.DeleteClusterRequest(name=self.name) # We expect a `google.protobuf.empty_pb2.Empty` self._client._cluster_stub.DeleteCluster( request_pb, self._client.timeout_seconds) @@ -456,7 +457,7 @@ def undelete(self): :returns: The long-running operation corresponding to the undelete operation. """ - request_pb = messages_pb2.UndeleteClusterRequest(name=self.name) + request_pb = messages_v1_pb2.UndeleteClusterRequest(name=self.name) # We expect a `google.longrunning.operations_pb2.Operation`. operation_pb2 = self._client._cluster_stub.UndeleteCluster( request_pb, self._client.timeout_seconds) @@ -472,8 +473,8 @@ def list_tables(self): :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. """ - request_pb = table_messages_pb2.ListTablesRequest(name=self.name) - # We expect a `table_messages_pb2.ListTablesResponse` + request_pb = table_messages_v1_pb2.ListTablesRequest(name=self.name) + # We expect a `table_messages_v1_pb2.ListTablesResponse` table_list_pb = self._client._table_stub.ListTables( request_pb, self._client.timeout_seconds) diff --git a/gcloud/bigtable/column_family.py b/gcloud/bigtable/column_family.py index c0d9060316a4..5d17be804f9c 100644 --- a/gcloud/bigtable/column_family.py +++ b/gcloud/bigtable/column_family.py @@ -20,9 +20,10 @@ from google.protobuf import duration_pb2 from gcloud._helpers import _total_seconds -from gcloud.bigtable._generated import bigtable_table_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + bigtable_table_data_pb2 as data_v1_pb2) +from gcloud.bigtable._generated import ( + bigtable_table_service_messages_pb2 as messages_v1_pb2) def _timedelta_to_duration_pb(timedelta_val): @@ -110,10 +111,10 @@ def __eq__(self, other): def to_pb(self): """Converts the garbage collection rule to a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.data_v1_pb2.GcRule` :returns: The converted current object. """ - return data_pb2.GcRule(max_num_versions=self.max_num_versions) + return data_v1_pb2.GcRule(max_num_versions=self.max_num_versions) class MaxAgeGCRule(GarbageCollectionRule): @@ -134,11 +135,11 @@ def __eq__(self, other): def to_pb(self): """Converts the garbage collection rule to a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.data_v1_pb2.GcRule` :returns: The converted current object. """ max_age = _timedelta_to_duration_pb(self.max_age) - return data_pb2.GcRule(max_age=max_age) + return data_v1_pb2.GcRule(max_age=max_age) class GCRuleUnion(GarbageCollectionRule): @@ -159,12 +160,12 @@ def __eq__(self, other): def to_pb(self): """Converts the union into a single GC rule as a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.data_v1_pb2.GcRule` :returns: The converted current object. """ - union = data_pb2.GcRule.Union( + union = data_v1_pb2.GcRule.Union( rules=[rule.to_pb() for rule in self.rules]) - return data_pb2.GcRule(union=union) + return data_v1_pb2.GcRule(union=union) class GCRuleIntersection(GarbageCollectionRule): @@ -185,12 +186,12 @@ def __eq__(self, other): def to_pb(self): """Converts the intersection into a single GC rule as a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.data_v1_pb2.GcRule` :returns: The converted current object. """ - intersection = data_pb2.GcRule.Intersection( + intersection = data_v1_pb2.GcRule.Intersection( rules=[rule.to_pb() for rule in self.rules]) - return data_pb2.GcRule(intersection=intersection) + return data_v1_pb2.GcRule(intersection=intersection) class ColumnFamily(object): @@ -250,16 +251,17 @@ def __ne__(self, other): def create(self): """Create this column family.""" if self.gc_rule is None: - column_family = data_pb2.ColumnFamily() + column_family = data_v1_pb2.ColumnFamily() else: - column_family = data_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) - request_pb = messages_pb2.CreateColumnFamilyRequest( + column_family = data_v1_pb2.ColumnFamily( + gc_rule=self.gc_rule.to_pb()) + request_pb = messages_v1_pb2.CreateColumnFamilyRequest( name=self._table.name, column_family_id=self.column_family_id, column_family=column_family, ) client = self._table._cluster._client - # We expect a `.data_pb2.ColumnFamily`. We ignore it since the only + # We expect a `.data_v1_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. client._table_stub.CreateColumnFamily(request_pb, @@ -276,9 +278,9 @@ def update(self): request_kwargs = {'name': self.name} if self.gc_rule is not None: request_kwargs['gc_rule'] = self.gc_rule.to_pb() - request_pb = data_pb2.ColumnFamily(**request_kwargs) + request_pb = data_v1_pb2.ColumnFamily(**request_kwargs) client = self._table._cluster._client - # We expect a `.data_pb2.ColumnFamily`. We ignore it since the only + # We expect a `.data_v1_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. client._table_stub.UpdateColumnFamily(request_pb, @@ -286,7 +288,7 @@ def update(self): def delete(self): """Delete this column family.""" - request_pb = messages_pb2.DeleteColumnFamilyRequest(name=self.name) + request_pb = messages_v1_pb2.DeleteColumnFamilyRequest(name=self.name) client = self._table._cluster._client # We expect a `google.protobuf.empty_pb2.Empty` client._table_stub.DeleteColumnFamily(request_pb, @@ -296,7 +298,7 @@ def delete(self): def _gc_rule_from_pb(gc_rule_pb): """Convert a protobuf GC rule to a native object. - :type gc_rule_pb: :class:`.data_pb2.GcRule` + :type gc_rule_pb: :class:`.data_v1_pb2.GcRule` :param gc_rule_pb: The GC rule to convert. :rtype: :class:`GarbageCollectionRule` or :data:`NoneType ` diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py index cb9ce2e67e3d..1dbd38aa7962 100644 --- a/gcloud/bigtable/row.py +++ b/gcloud/bigtable/row.py @@ -22,9 +22,10 @@ from gcloud._helpers import _datetime_from_microseconds from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) +from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) _PACK_I64 = struct.Struct('>q').pack @@ -133,13 +134,13 @@ def _set_cell(self, column_family_id, column, value, timestamp=None, # Truncate to millisecond granularity. timestamp_micros -= (timestamp_micros % 1000) - mutation_val = data_pb2.Mutation.SetCell( + mutation_val = data_v1_pb2.Mutation.SetCell( family_name=column_family_id, column_qualifier=column, timestamp_micros=timestamp_micros, value=value, ) - mutation_pb = data_pb2.Mutation(set_cell=mutation_val) + mutation_pb = data_v1_pb2.Mutation(set_cell=mutation_val) self._get_mutations(state).append(mutation_pb) def _delete(self, state=None): @@ -155,8 +156,8 @@ def _delete(self, state=None): :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. """ - mutation_val = data_pb2.Mutation.DeleteFromRow() - mutation_pb = data_pb2.Mutation(delete_from_row=mutation_val) + mutation_val = data_v1_pb2.Mutation.DeleteFromRow() + mutation_pb = data_v1_pb2.Mutation(delete_from_row=mutation_val) self._get_mutations(state).append(mutation_pb) def _delete_cells(self, column_family_id, columns, time_range=None, @@ -187,10 +188,10 @@ def _delete_cells(self, column_family_id, columns, time_range=None, """ mutations_list = self._get_mutations(state) if columns is self.ALL_COLUMNS: - mutation_val = data_pb2.Mutation.DeleteFromFamily( + mutation_val = data_v1_pb2.Mutation.DeleteFromFamily( family_name=column_family_id, ) - mutation_pb = data_pb2.Mutation(delete_from_family=mutation_val) + mutation_pb = data_v1_pb2.Mutation(delete_from_family=mutation_val) mutations_list.append(mutation_pb) else: delete_kwargs = {} @@ -206,9 +207,9 @@ def _delete_cells(self, column_family_id, columns, time_range=None, family_name=column_family_id, column_qualifier=column, ) - mutation_val = data_pb2.Mutation.DeleteFromColumn( + mutation_val = data_v1_pb2.Mutation.DeleteFromColumn( **delete_kwargs) - mutation_pb = data_pb2.Mutation( + mutation_pb = data_v1_pb2.Mutation( delete_from_column=mutation_val) to_append.append(mutation_pb) @@ -388,7 +389,7 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total mutations exceed the maximum allowable ' '%d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_pb2.MutateRowRequest( + request_pb = messages_v1_pb2.MutateRowRequest( table_name=self._table.name, row_key=self._row_key, mutations=mutations_list, @@ -503,14 +504,14 @@ def commit(self): 'mutations and %d false mutations.' % ( MAX_MUTATIONS, num_true_mutations, num_false_mutations)) - request_pb = messages_pb2.CheckAndMutateRowRequest( + request_pb = messages_v1_pb2.CheckAndMutateRowRequest( table_name=self._table.name, row_key=self._row_key, predicate_filter=self._filter.to_pb(), true_mutations=true_mutations, false_mutations=false_mutations, ) - # We expect a `.messages_pb2.CheckAndMutateRowResponse` + # We expect a `.messages_v1_pb2.CheckAndMutateRowResponse` client = self._table._cluster._client resp = client._data_stub.CheckAndMutateRow( request_pb, client.timeout_seconds) @@ -700,9 +701,10 @@ def append_cell_value(self, column_family_id, column, value): """ column = _to_bytes(column) value = _to_bytes(value) - rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id, - column_qualifier=column, - append_value=value) + rule_pb = data_v1_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + append_value=value) self._rule_pb_list.append(rule_pb) def increment_cell_value(self, column_family_id, column, int_value): @@ -736,9 +738,10 @@ def increment_cell_value(self, column_family_id, column, int_value): will fail. """ column = _to_bytes(column) - rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id, - column_qualifier=column, - increment_amount=int_value) + rule_pb = data_v1_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + increment_amount=int_value) self._rule_pb_list.append(rule_pb) def commit(self): @@ -791,12 +794,12 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total append mutations exceed the maximum ' 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_pb2.ReadModifyWriteRowRequest( + request_pb = messages_v1_pb2.ReadModifyWriteRowRequest( table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list, ) - # We expect a `.data_pb2.Row` + # We expect a `.data_v1_pb2.Row` client = self._table._cluster._client row_response = client._data_stub.ReadModifyWriteRow( request_pb, client.timeout_seconds) @@ -811,7 +814,7 @@ def commit(self): def _parse_rmw_row_response(row_response): """Parses the response to a ``ReadModifyWriteRow`` request. - :type row_response: :class:`.data_pb2.Row` + :type row_response: :class:`.data_v1_pb2.Row` :param row_response: The response row (with only modified cells) from a ``ReadModifyWriteRow`` request. diff --git a/gcloud/bigtable/row_filters.py b/gcloud/bigtable/row_filters.py index b7a1388b3a09..2b11a06bfdd9 100644 --- a/gcloud/bigtable/row_filters.py +++ b/gcloud/bigtable/row_filters.py @@ -17,7 +17,8 @@ from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 +from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) class RowFilter(object): @@ -65,10 +66,10 @@ class SinkFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(sink=self.flag) + return data_v1_pb2.RowFilter(sink=self.flag) class PassAllFilter(_BoolFilter): @@ -83,10 +84,10 @@ class PassAllFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(pass_all_filter=self.flag) + return data_v1_pb2.RowFilter(pass_all_filter=self.flag) class BlockAllFilter(_BoolFilter): @@ -100,10 +101,10 @@ class BlockAllFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(block_all_filter=self.flag) + return data_v1_pb2.RowFilter(block_all_filter=self.flag) class _RegexFilter(RowFilter): @@ -153,10 +154,10 @@ class RowKeyRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(row_key_regex_filter=self.regex) + return data_v1_pb2.RowFilter(row_key_regex_filter=self.regex) class RowSampleFilter(RowFilter): @@ -178,10 +179,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(row_sample_filter=self.sample) + return data_v1_pb2.RowFilter(row_sample_filter=self.sample) class FamilyNameRegexFilter(_RegexFilter): @@ -202,10 +203,10 @@ class FamilyNameRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(family_name_regex_filter=self.regex) + return data_v1_pb2.RowFilter(family_name_regex_filter=self.regex) class ColumnQualifierRegexFilter(_RegexFilter): @@ -232,10 +233,10 @@ class ColumnQualifierRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(column_qualifier_regex_filter=self.regex) + return data_v1_pb2.RowFilter(column_qualifier_regex_filter=self.regex) class TimestampRange(object): @@ -266,7 +267,7 @@ def __ne__(self, other): def to_pb(self): """Converts the :class:`TimestampRange` to a protobuf. - :rtype: :class:`.data_pb2.TimestampRange` + :rtype: :class:`.data_v1_pb2.TimestampRange` :returns: The converted current object. """ timestamp_range_kwargs = {} @@ -276,7 +277,7 @@ def to_pb(self): if self.end is not None: timestamp_range_kwargs['end_timestamp_micros'] = ( _microseconds_from_datetime(self.end)) - return data_pb2.TimestampRange(**timestamp_range_kwargs) + return data_v1_pb2.TimestampRange(**timestamp_range_kwargs) class TimestampRangeFilter(RowFilter): @@ -300,10 +301,11 @@ def to_pb(self): First converts the ``range_`` on the current object to a protobuf and then uses it in the ``timestamp_range_filter`` field. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb()) + return data_v1_pb2.RowFilter( + timestamp_range_filter=self.range_.to_pb()) class ColumnRangeFilter(RowFilter): @@ -375,10 +377,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - First converts to a :class:`.data_pb2.ColumnRange` and then uses it + First converts to a :class:`.data_v1_pb2.ColumnRange` and then uses it in the ``column_range_filter`` field. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ column_range_kwargs = {'family_name': self.column_family_id} @@ -395,8 +397,8 @@ def to_pb(self): key = 'end_qualifier_exclusive' column_range_kwargs[key] = _to_bytes(self.end_column) - column_range = data_pb2.ColumnRange(**column_range_kwargs) - return data_pb2.RowFilter(column_range_filter=column_range) + column_range = data_v1_pb2.ColumnRange(**column_range_kwargs) + return data_v1_pb2.RowFilter(column_range_filter=column_range) class ValueRegexFilter(_RegexFilter): @@ -423,10 +425,10 @@ class ValueRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(value_regex_filter=self.regex) + return data_v1_pb2.RowFilter(value_regex_filter=self.regex) class ValueRangeFilter(RowFilter): @@ -492,10 +494,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - First converts to a :class:`.data_pb2.ValueRange` and then uses + First converts to a :class:`.data_v1_pb2.ValueRange` and then uses it to create a row filter protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ value_range_kwargs = {} @@ -512,8 +514,8 @@ def to_pb(self): key = 'end_value_exclusive' value_range_kwargs[key] = _to_bytes(self.end_value) - value_range = data_pb2.ValueRange(**value_range_kwargs) - return data_pb2.RowFilter(value_range_filter=value_range) + value_range = data_v1_pb2.ValueRange(**value_range_kwargs) + return data_v1_pb2.RowFilter(value_range_filter=value_range) class _CellCountFilter(RowFilter): @@ -545,10 +547,11 @@ class CellsRowOffsetFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells) + return data_v1_pb2.RowFilter( + cells_per_row_offset_filter=self.num_cells) class CellsRowLimitFilter(_CellCountFilter): @@ -561,10 +564,10 @@ class CellsRowLimitFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) + return data_v1_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) class CellsColumnLimitFilter(_CellCountFilter): @@ -579,10 +582,11 @@ class CellsColumnLimitFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells) + return data_v1_pb2.RowFilter( + cells_per_column_limit_filter=self.num_cells) class StripValueTransformerFilter(_BoolFilter): @@ -597,10 +601,10 @@ class StripValueTransformerFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(strip_value_transformer=self.flag) + return data_v1_pb2.RowFilter(strip_value_transformer=self.flag) class ApplyLabelFilter(RowFilter): @@ -633,10 +637,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(apply_label_transformer=self.label) + return data_v1_pb2.RowFilter(apply_label_transformer=self.label) class _FilterCombination(RowFilter): @@ -675,12 +679,12 @@ class RowFilterChain(_FilterCombination): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - chain = data_pb2.RowFilter.Chain( + chain = data_v1_pb2.RowFilter.Chain( filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_pb2.RowFilter(chain=chain) + return data_v1_pb2.RowFilter(chain=chain) class RowFilterUnion(_FilterCombination): @@ -699,12 +703,12 @@ class RowFilterUnion(_FilterCombination): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ - interleave = data_pb2.RowFilter.Interleave( + interleave = data_v1_pb2.RowFilter.Interleave( filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_pb2.RowFilter(interleave=interleave) + return data_v1_pb2.RowFilter(interleave=interleave) class ConditionalRowFilter(RowFilter): @@ -752,7 +756,7 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v1_pb2.RowFilter` :returns: The converted current object. """ condition_kwargs = {'predicate_filter': self.base_filter.to_pb()} @@ -760,5 +764,5 @@ def to_pb(self): condition_kwargs['true_filter'] = self.true_filter.to_pb() if self.false_filter is not None: condition_kwargs['false_filter'] = self.false_filter.to_pb() - condition = data_pb2.RowFilter.Condition(**condition_kwargs) - return data_pb2.RowFilter(condition=condition) + condition = data_v1_pb2.RowFilter.Condition(**condition_kwargs) + return data_v1_pb2.RowFilter(condition=condition) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index c619b7145d71..155b5123c67f 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -16,11 +16,12 @@ from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as data_messages_pb2) + bigtable_table_service_messages_pb2 as messages_v1_pb2) +from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as data_messages_v1_pb2) from gcloud.bigtable.column_family import _gc_rule_from_pb from gcloud.bigtable.column_family import ColumnFamily from gcloud.bigtable.row import AppendRow @@ -167,7 +168,7 @@ def create(self, initial_split_keys=None): created, spanning the key ranges: ``[, s1)``, ``[s1, s2)``, ``[s2, )``. """ - request_pb = messages_pb2.CreateTableRequest( + request_pb = messages_v1_pb2.CreateTableRequest( initial_split_keys=initial_split_keys or [], name=self._cluster.name, table_id=self.table_id, @@ -178,7 +179,7 @@ def create(self, initial_split_keys=None): def delete(self): """Delete this table.""" - request_pb = messages_pb2.DeleteTableRequest(name=self.name) + request_pb = messages_v1_pb2.DeleteTableRequest(name=self.name) client = self._cluster._client # We expect a `google.protobuf.empty_pb2.Empty` client._table_stub.DeleteTable(request_pb, client.timeout_seconds) @@ -194,7 +195,7 @@ def list_column_families(self): family name from the response does not agree with the computed name from the column family ID. """ - request_pb = messages_pb2.GetTableRequest(name=self.name) + request_pb = messages_v1_pb2.GetTableRequest(name=self.name) client = self._cluster._client # We expect a `._generated.bigtable_table_data_pb2.Table` table_pb = client._table_stub.GetTable(request_pb, @@ -233,7 +234,7 @@ def read_row(self, row_key, filter_=None): client = self._cluster._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` + # We expect an iterator of `data_messages_v1_pb2.ReadRowsResponse` result = PartialRowData(row_key) for read_rows_response in response_iterator: result.update_from_read_rows(read_rows_response) @@ -296,7 +297,7 @@ def read_rows(self, start_key=None, end_key=None, client = self._cluster._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` + # We expect an iterator of `data_messages_v1_pb2.ReadRowsResponse` return PartialRowsData(response_iterator) def sample_row_keys(self): @@ -330,7 +331,7 @@ def sample_row_keys(self): or by casting to a :class:`list` and can be cancelled by calling ``cancel()``. """ - request_pb = data_messages_pb2.SampleRowKeysRequest( + request_pb = data_messages_v1_pb2.SampleRowKeysRequest( table_name=self.name) client = self._cluster._client response_iterator = client._data_stub.SampleRowKeys( @@ -383,7 +384,7 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, more than N rows. However, only N ``commit_row`` chunks will be sent. - :rtype: :class:`data_messages_pb2.ReadRowsRequest` + :rtype: :class:`data_messages_v1_pb2.ReadRowsRequest` :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. :raises: :class:`ValueError ` if both ``row_key`` and one of ``start_key`` and ``end_key`` are set @@ -401,7 +402,7 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, range_kwargs['start_key'] = _to_bytes(start_key) if end_key is not None: range_kwargs['end_key'] = _to_bytes(end_key) - row_range = data_pb2.RowRange(**range_kwargs) + row_range = data_v1_pb2.RowRange(**range_kwargs) request_kwargs['row_range'] = row_range if filter_ is not None: request_kwargs['filter'] = filter_.to_pb() @@ -410,4 +411,4 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, if limit is not None: request_kwargs['num_rows_limit'] = limit - return data_messages_pb2.ReadRowsRequest(**request_kwargs) + return data_messages_v1_pb2.ReadRowsRequest(**request_kwargs) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index bb424ad259b1..ccd276fdb750 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -289,9 +289,9 @@ def test_table_stub_unset_failure(self): def test__make_data_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import DATA_API_HOST - from gcloud.bigtable.client import DATA_API_PORT - from gcloud.bigtable.client import DATA_STUB_FACTORY + from gcloud.bigtable.client import DATA_API_HOST_V1 + from gcloud.bigtable.client import DATA_API_PORT_V1 + from gcloud.bigtable.client import DATA_STUB_FACTORY_V1 credentials = _Credentials() project = 'PROJECT' @@ -311,18 +311,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - DATA_STUB_FACTORY, - DATA_API_HOST, - DATA_API_PORT, + DATA_STUB_FACTORY_V1, + DATA_API_HOST_V1, + DATA_API_PORT_V1, ), ]) def test__make_cluster_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT - from gcloud.bigtable.client import CLUSTER_STUB_FACTORY + from gcloud.bigtable.client import CLUSTER_ADMIN_HOST_V1 + from gcloud.bigtable.client import CLUSTER_ADMIN_PORT_V1 + from gcloud.bigtable.client import CLUSTER_STUB_FACTORY_V1 credentials = _Credentials() project = 'PROJECT' @@ -342,18 +342,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - CLUSTER_STUB_FACTORY, - CLUSTER_ADMIN_HOST, - CLUSTER_ADMIN_PORT, + CLUSTER_STUB_FACTORY_V1, + CLUSTER_ADMIN_HOST_V1, + CLUSTER_ADMIN_PORT_V1, ), ]) def test__make_operations_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT - from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY + from gcloud.bigtable.client import OPERATIONS_API_HOST_V1 + from gcloud.bigtable.client import OPERATIONS_API_PORT_V1 + from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY_V1 credentials = _Credentials() project = 'PROJECT' @@ -373,18 +373,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - OPERATIONS_STUB_FACTORY, - CLUSTER_ADMIN_HOST, - CLUSTER_ADMIN_PORT, + OPERATIONS_STUB_FACTORY_V1, + OPERATIONS_API_HOST_V1, + OPERATIONS_API_PORT_V1, ), ]) def test__make_table_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import TABLE_ADMIN_HOST - from gcloud.bigtable.client import TABLE_ADMIN_PORT - from gcloud.bigtable.client import TABLE_STUB_FACTORY + from gcloud.bigtable.client import TABLE_ADMIN_HOST_V1 + from gcloud.bigtable.client import TABLE_ADMIN_PORT_V1 + from gcloud.bigtable.client import TABLE_STUB_FACTORY_V1 credentials = _Credentials() project = 'PROJECT' @@ -404,9 +404,9 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - TABLE_STUB_FACTORY, - TABLE_ADMIN_HOST, - TABLE_ADMIN_PORT, + TABLE_STUB_FACTORY_V1, + TABLE_ADMIN_HOST_V1, + TABLE_ADMIN_PORT_V1, ), ]) @@ -543,9 +543,9 @@ def test_cluster_factory(self): def _list_zones_helper(self, zone_status): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub credentials = _Credentials() @@ -555,17 +555,17 @@ def _list_zones_helper(self, zone_status): admin=True, timeout_seconds=timeout_seconds) # Create request_pb - request_pb = messages_pb2.ListZonesRequest( + request_pb = messages_v1_pb2.ListZonesRequest( name='projects/' + project, ) # Create response_pb zone1 = 'foo' zone2 = 'bar' - response_pb = messages_pb2.ListZonesResponse( + response_pb = messages_v1_pb2.ListZonesResponse( zones=[ - data_pb2.Zone(display_name=zone1, status=zone_status), - data_pb2.Zone(display_name=zone2, status=zone_status), + data_v1_pb2.Zone(display_name=zone1, status=zone_status), + data_v1_pb2.Zone(display_name=zone2, status=zone_status), ], ) @@ -586,20 +586,20 @@ def _list_zones_helper(self, zone_status): def test_list_zones(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - self._list_zones_helper(data_pb2.Zone.OK) + bigtable_cluster_data_pb2 as data_v1_pb2) + self._list_zones_helper(data_v1_pb2.Zone.OK) def test_list_zones_failure(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) with self.assertRaises(ValueError): - self._list_zones_helper(data_pb2.Zone.EMERGENCY_MAINENANCE) + self._list_zones_helper(data_v1_pb2.Zone.EMERGENCY_MAINENANCE) def test_list_clusters(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub credentials = _Credentials() @@ -609,7 +609,7 @@ def test_list_clusters(self): admin=True, timeout_seconds=timeout_seconds) # Create request_pb - request_pb = messages_pb2.ListClustersRequest( + request_pb = messages_v1_pb2.ListClustersRequest( name='projects/' + project, ) @@ -622,17 +622,17 @@ def test_list_clusters(self): '/clusters/' + cluster_id1) cluster_name2 = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id2) - response_pb = messages_pb2.ListClustersResponse( + response_pb = messages_v1_pb2.ListClustersResponse( failed_zones=[ - data_pb2.Zone(display_name=failed_zone), + data_v1_pb2.Zone(display_name=failed_zone), ], clusters=[ - data_pb2.Cluster( + data_v1_pb2.Cluster( name=cluster_name1, display_name=cluster_name1, serve_nodes=3, ), - data_pb2.Cluster( + data_v1_pb2.Cluster( name=cluster_name2, display_name=cluster_name2, serve_nodes=3, diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index 427a4ec9126b..294f9a0d0f55 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -209,12 +209,12 @@ def test_table_factory(self): def test__update_from_pb_success(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES display_name = 'display_name' serve_nodes = 8 - cluster_pb = data_pb2.Cluster( + cluster_pb = data_v1_pb2.Cluster( display_name=display_name, serve_nodes=serve_nodes, ) @@ -228,10 +228,10 @@ def test__update_from_pb_success(self): def test__update_from_pb_no_display_name(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - cluster_pb = data_pb2.Cluster(serve_nodes=331) + cluster_pb = data_v1_pb2.Cluster(serve_nodes=331) cluster = self._makeOne(None, None, None) self.assertEqual(cluster.display_name, None) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) @@ -242,10 +242,10 @@ def test__update_from_pb_no_display_name(self): def test__update_from_pb_no_serve_nodes(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - cluster_pb = data_pb2.Cluster(display_name='name') + cluster_pb = data_v1_pb2.Cluster(display_name='name') cluster = self._makeOne(None, None, None) self.assertEqual(cluster.display_name, None) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) @@ -256,7 +256,7 @@ def test__update_from_pb_no_serve_nodes(self): def test_from_pb_success(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) project = 'PROJECT' zone = 'zone' @@ -265,7 +265,7 @@ def test_from_pb_success(self): cluster_name = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id) - cluster_pb = data_pb2.Cluster( + cluster_pb = data_v1_pb2.Cluster( name=cluster_name, display_name=cluster_id, serve_nodes=331, @@ -280,10 +280,10 @@ def test_from_pb_success(self): def test_from_pb_bad_cluster_name(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) cluster_name = 'INCORRECT_FORMAT' - cluster_pb = data_pb2.Cluster(name=cluster_name) + cluster_pb = data_v1_pb2.Cluster(name=cluster_name) klass = self._getTargetClass() with self.assertRaises(ValueError): @@ -291,7 +291,7 @@ def test_from_pb_bad_cluster_name(self): def test_from_pb_project_mistmatch(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) project = 'PROJECT' zone = 'zone' @@ -303,7 +303,7 @@ def test_from_pb_project_mistmatch(self): cluster_name = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id) - cluster_pb = data_pb2.Cluster(name=cluster_name) + cluster_pb = data_v1_pb2.Cluster(name=cluster_name) klass = self._getTargetClass() with self.assertRaises(ValueError): @@ -349,9 +349,9 @@ def test___ne__(self): def test_reload(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES @@ -366,12 +366,12 @@ def test_reload(self): # Create request_pb cluster_name = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id) - request_pb = messages_pb2.GetClusterRequest(name=cluster_name) + request_pb = messages_v1_pb2.GetClusterRequest(name=cluster_name) # Create response_pb serve_nodes = 31 display_name = u'hey-hi-hello' - response_pb = data_pb2.Cluster( + response_pb = data_v1_pb2.Cluster( display_name=display_name, serve_nodes=serve_nodes, ) @@ -403,7 +403,7 @@ def test_create(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT @@ -425,7 +425,7 @@ def test_create(self): op_name = ('operations/projects/%s/zones/%s/clusters/%s/' 'operations/%d' % (project, zone, cluster_id, op_id)) current_op = operations_pb2.Operation(name=op_name) - response_pb = data_pb2.Cluster(current_operation=current_op) + response_pb = data_v1_pb2.Cluster(current_operation=current_op) # Patch the stub used by the API method. client._cluster_stub = stub = _FakeStub(response_pb) @@ -465,7 +465,7 @@ def test_update(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT @@ -484,7 +484,7 @@ def test_update(self): # Create request_pb cluster_name = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id) - request_pb = data_pb2.Cluster( + request_pb = data_v1_pb2.Cluster( name=cluster_name, display_name=display_name, serve_nodes=serve_nodes, @@ -492,7 +492,7 @@ def test_update(self): # Create response_pb current_op = operations_pb2.Operation() - response_pb = data_pb2.Cluster(current_operation=current_op) + response_pb = data_v1_pb2.Cluster(current_operation=current_op) # Patch the stub used by the API method. client._cluster_stub = stub = _FakeStub(response_pb) @@ -525,7 +525,7 @@ def mock_process_operation(operation_pb): def test_delete(self): from google.protobuf import empty_pb2 from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project = 'PROJECT' @@ -539,7 +539,7 @@ def test_delete(self): # Create request_pb cluster_name = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id) - request_pb = messages_pb2.DeleteClusterRequest(name=cluster_name) + request_pb = messages_v1_pb2.DeleteClusterRequest(name=cluster_name) # Create response_pb response_pb = empty_pb2.Empty() @@ -564,7 +564,7 @@ def test_undelete(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT @@ -579,7 +579,7 @@ def test_undelete(self): # Create request_pb cluster_name = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id) - request_pb = messages_pb2.UndeleteClusterRequest(name=cluster_name) + request_pb = messages_v1_pb2.UndeleteClusterRequest(name=cluster_name) # Create response_pb response_pb = operations_pb2.Operation() @@ -616,7 +616,7 @@ def _list_tables_helper(self, table_id, table_name=None): from gcloud.bigtable._generated import ( bigtable_table_data_pb2 as table_data_pb2) from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_pb2) + bigtable_table_service_messages_pb2 as table_messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project = 'PROJECT' @@ -630,11 +630,12 @@ def _list_tables_helper(self, table_id, table_name=None): # Create request_ cluster_name = ('projects/' + project + '/zones/' + zone + '/clusters/' + cluster_id) - request_pb = table_messages_pb2.ListTablesRequest(name=cluster_name) + request_pb = table_messages_v1_pb2.ListTablesRequest( + name=cluster_name) # Create response_pb table_name = table_name or (cluster_name + '/tables/' + table_id) - response_pb = table_messages_pb2.ListTablesResponse( + response_pb = table_messages_v1_pb2.ListTablesResponse( tables=[ table_data_pb2.Table(name=table_name), ], @@ -686,9 +687,9 @@ def _callFUT(self, cluster): def test_it(self): from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.cluster import Cluster project = 'PROJECT' @@ -702,11 +703,11 @@ def test_it(self): display_name=display_name, serve_nodes=serve_nodes) request_pb = self._callFUT(cluster) self.assertTrue(isinstance(request_pb, - messages_pb2.CreateClusterRequest)) + messages_v1_pb2.CreateClusterRequest)) self.assertEqual(request_pb.cluster_id, cluster_id) self.assertEqual(request_pb.name, 'projects/' + project + '/zones/' + zone) - self.assertTrue(isinstance(request_pb.cluster, data_pb2.Cluster)) + self.assertTrue(isinstance(request_pb.cluster, data_v1_pb2.Cluster)) self.assertEqual(request_pb.cluster.display_name, display_name) self.assertEqual(request_pb.cluster.serve_nodes, serve_nodes) @@ -720,13 +721,14 @@ def _callFUT(self, any_val, expected_type=None): def test_with_known_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable import cluster as MUT - type_url = 'type.googleapis.com/' + data_pb2._CELL.full_name - fake_type_url_map = {type_url: data_pb2.Cell} + type_url = 'type.googleapis.com/' + data_v1_pb2._CELL.full_name + fake_type_url_map = {type_url: data_v1_pb2.Cell} - cell = data_pb2.Cell( + cell = data_v1_pb2.Cell( timestamp_micros=0, value=b'foobar', ) @@ -743,19 +745,19 @@ def test_with_create_cluster_metadata(self): from google.protobuf import any_pb2 from google.protobuf.timestamp_pb2 import Timestamp from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) type_url = ('type.googleapis.com/' + - messages_pb2._CREATECLUSTERMETADATA.full_name) - metadata = messages_pb2.CreateClusterMetadata( + messages_v1_pb2._CREATECLUSTERMETADATA.full_name) + metadata = messages_v1_pb2.CreateClusterMetadata( request_time=Timestamp(seconds=1, nanos=1234), finish_time=Timestamp(seconds=10, nanos=891011), - original_request=messages_pb2.CreateClusterRequest( + original_request=messages_v1_pb2.CreateClusterRequest( name='foo', cluster_id='bar', - cluster=data_pb2.Cluster( + cluster=data_v1_pb2.Cluster( display_name='quux', serve_nodes=1337, ), @@ -773,17 +775,17 @@ def test_with_update_cluster_metadata(self): from google.protobuf import any_pb2 from google.protobuf.timestamp_pb2 import Timestamp from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) type_url = ('type.googleapis.com/' + - messages_pb2._UPDATECLUSTERMETADATA.full_name) - metadata = messages_pb2.UpdateClusterMetadata( + messages_v1_pb2._UPDATECLUSTERMETADATA.full_name) + metadata = messages_v1_pb2.UpdateClusterMetadata( request_time=Timestamp(seconds=1, nanos=1234), finish_time=Timestamp(seconds=10, nanos=891011), cancel_time=Timestamp(seconds=100, nanos=76543), - original_request=data_pb2.Cluster( + original_request=data_v1_pb2.Cluster( display_name='the-end', serve_nodes=42, ), @@ -800,13 +802,11 @@ def test_with_undelete_cluster_metadata(self): from google.protobuf import any_pb2 from google.protobuf.timestamp_pb2 import Timestamp from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) type_url = ('type.googleapis.com/' + - messages_pb2._UNDELETECLUSTERMETADATA.full_name) - metadata = messages_pb2.UndeleteClusterMetadata( + messages_v1_pb2._UNDELETECLUSTERMETADATA.full_name) + metadata = messages_v1_pb2.UndeleteClusterMetadata( request_time=Timestamp(seconds=1, nanos=1234), finish_time=Timestamp(seconds=10, nanos=891011), ) @@ -853,7 +853,7 @@ def test_it(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable import cluster as MUT project = 'PROJECT' @@ -867,7 +867,7 @@ def test_it(self): current_op = operations_pb2.Operation(name=operation_name) # Create mocks. - request_metadata = messages_pb2.CreateClusterMetadata() + request_metadata = messages_v1_pb2.CreateClusterMetadata() parse_pb_any_called = [] def mock_parse_pb_any_to_native(any_val, expected_type=None): @@ -897,10 +897,10 @@ def mock_pb_timestamp_to_datetime(timestamp): def test_op_name_parsing_failure(self): from google.longrunning import operations_pb2 from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + bigtable_cluster_data_pb2 as data_v1_pb2) current_op = operations_pb2.Operation(name='invalid') - cluster = data_pb2.Cluster(current_operation=current_op) + cluster = data_v1_pb2.Cluster(current_operation=current_op) with self.assertRaises(ValueError): self._callFUT(cluster) diff --git a/gcloud/bigtable/test_column_family.py b/gcloud/bigtable/test_column_family.py index 139a959e0a7b..00384a3df0df 100644 --- a/gcloud/bigtable/test_column_family.py +++ b/gcloud/bigtable/test_column_family.py @@ -108,12 +108,12 @@ def test___ne__same_value(self): def test_to_pb(self): from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) max_num_versions = 1337 gc_rule = self._makeOne(max_num_versions=max_num_versions) pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, - data_pb2.GcRule(max_num_versions=max_num_versions)) + expected = data_v1_pb2.GcRule(max_num_versions=max_num_versions) + self.assertEqual(pb_val, expected) class TestMaxAgeGCRule(unittest2.TestCase): @@ -148,13 +148,13 @@ def test_to_pb(self): import datetime from google.protobuf import duration_pb2 from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) max_age = datetime.timedelta(seconds=1) duration = duration_pb2.Duration(seconds=1) gc_rule = self._makeOne(max_age=max_age) pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, data_pb2.GcRule(max_age=duration)) + self.assertEqual(pb_val, data_v1_pb2.GcRule(max_age=duration)) class TestGCRuleUnion(unittest2.TestCase): @@ -194,21 +194,22 @@ def test_to_pb(self): import datetime from google.protobuf import duration_pb2 from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions) + pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = data_v1_pb2.GcRule( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) + pb_rule3 = data_v1_pb2.GcRule( + union=data_v1_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) gc_rule_pb = rule3.to_pb() self.assertEqual(gc_rule_pb, pb_rule3) @@ -217,29 +218,30 @@ def test_to_pb_nested(self): import datetime from google.protobuf import duration_pb2 from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1) + pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions1) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = data_v1_pb2.GcRule( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) + pb_rule3 = data_v1_pb2.GcRule( + union=data_v1_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2) + pb_rule4 = data_v1_pb2.GcRule(max_num_versions=max_num_versions2) rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule3, pb_rule4])) + pb_rule5 = data_v1_pb2.GcRule( + union=data_v1_pb2.GcRule.Union(rules=[pb_rule3, pb_rule4])) gc_rule_pb = rule5.to_pb() self.assertEqual(gc_rule_pb, pb_rule5) @@ -282,21 +284,22 @@ def test_to_pb(self): import datetime from google.protobuf import duration_pb2 from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions) + pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = data_v1_pb2.GcRule( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( + pb_rule3 = data_v1_pb2.GcRule( + intersection=data_v1_pb2.GcRule.Intersection( rules=[pb_rule1, pb_rule2])) gc_rule_pb = rule3.to_pb() @@ -306,30 +309,31 @@ def test_to_pb_nested(self): import datetime from google.protobuf import duration_pb2 from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1) + pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions1) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = data_v1_pb2.GcRule( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( + pb_rule3 = data_v1_pb2.GcRule( + intersection=data_v1_pb2.GcRule.Intersection( rules=[pb_rule1, pb_rule2])) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2) + pb_rule4 = data_v1_pb2.GcRule(max_num_versions=max_num_versions2) rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( + pb_rule5 = data_v1_pb2.GcRule( + intersection=data_v1_pb2.GcRule.Intersection( rules=[pb_rule3, pb_rule4])) gc_rule_pb = rule5.to_pb() @@ -349,7 +353,8 @@ def test_constructor(self): column_family_id = u'column-family-id' table = object() gc_rule = object() - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) self.assertEqual(column_family.column_family_id, column_family_id) self.assertTrue(column_family._table is table) @@ -397,9 +402,9 @@ def test___ne__(self): def _create_test_helper(self, gc_rule=None): from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + bigtable_table_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -413,21 +418,23 @@ def _create_test_helper(self, gc_rule=None): client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) # Create request_pb if gc_rule is None: - column_family_pb = data_pb2.ColumnFamily() + column_family_pb = data_v1_pb2.ColumnFamily() else: - column_family_pb = data_pb2.ColumnFamily(gc_rule=gc_rule.to_pb()) - request_pb = messages_pb2.CreateColumnFamilyRequest( + column_family_pb = data_v1_pb2.ColumnFamily( + gc_rule=gc_rule.to_pb()) + request_pb = messages_v1_pb2.CreateColumnFamilyRequest( name=table_name, column_family_id=column_family_id, column_family=column_family_pb, ) # Create response_pb - response_pb = data_pb2.ColumnFamily() + response_pb = data_v1_pb2.ColumnFamily() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -456,7 +463,7 @@ def test_create_with_gc_rule(self): def _update_test_helper(self, gc_rule=None): from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -467,23 +474,25 @@ def _update_test_helper(self, gc_rule=None): timeout_seconds = 28 table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = table_name + '/columnFamilies/' + column_family_id + column_family_name = ( + table_name + '/columnFamilies/' + column_family_id) client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) # Create request_pb if gc_rule is None: - request_pb = data_pb2.ColumnFamily(name=column_family_name) + request_pb = data_v1_pb2.ColumnFamily(name=column_family_name) else: - request_pb = data_pb2.ColumnFamily( + request_pb = data_v1_pb2.ColumnFamily( name=column_family_name, gc_rule=gc_rule.to_pb(), ) # Create response_pb - response_pb = data_pb2.ColumnFamily() + response_pb = data_v1_pb2.ColumnFamily() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -513,7 +522,7 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + bigtable_table_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -524,14 +533,15 @@ def test_delete(self): timeout_seconds = 7 table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = table_name + '/columnFamilies/' + column_family_id + column_family_name = ( + table_name + '/columnFamilies/' + column_family_id) client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) column_family = self._makeOne(column_family_id, table) # Create request_pb - request_pb = messages_pb2.DeleteColumnFamilyRequest( + request_pb = messages_v1_pb2.DeleteColumnFamilyRequest( name=column_family_name) # Create response_pb @@ -563,9 +573,9 @@ def _callFUT(self, *args, **kwargs): def test_empty(self): from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) - gc_rule_pb = data_pb2.GcRule() + gc_rule_pb = data_v1_pb2.GcRule() self.assertEqual(self._callFUT(gc_rule_pb), None) def test_max_num_versions(self): diff --git a/gcloud/bigtable/test_row.py b/gcloud/bigtable/test_row.py index 9e6da708e6b6..e2336d7520f3 100644 --- a/gcloud/bigtable/test_row.py +++ b/gcloud/bigtable/test_row.py @@ -75,7 +75,8 @@ def _set_cell_helper(self, column=None, column_bytes=None, timestamp_micros=-1): import six import struct - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) row_key = b'row_key' column_family_id = u'column_family_id' @@ -89,8 +90,8 @@ def _set_cell_helper(self, column=None, column_bytes=None, if isinstance(value, six.integer_types): value = struct.pack('>q', value) - expected_pb = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( + expected_pb = data_v1_pb2.Mutation( + set_cell=data_v1_pb2.Mutation.SetCell( family_name=column_family_id, column_qualifier=column_bytes or column, timestamp_micros=timestamp_micros, @@ -134,15 +135,16 @@ def test_set_cell_with_non_null_timestamp(self): timestamp_micros=millis_granularity) def test_delete(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) row_key = b'row_key' row = self._makeOne(row_key, object()) self.assertEqual(row._pb_mutations, []) row.delete() - expected_pb = data_pb2.Mutation( - delete_from_row=data_pb2.Mutation.DeleteFromRow(), + expected_pb = data_v1_pb2.Mutation( + delete_from_row=data_v1_pb2.Mutation.DeleteFromRow(), ) self.assertEqual(row._pb_mutations, [expected_pb]) @@ -193,7 +195,8 @@ def test_delete_cells_non_iterable(self): row.delete_cells(column_family_id, columns) def test_delete_cells_all_columns(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) row_key = b'row_key' column_family_id = u'column_family_id' @@ -204,8 +207,8 @@ def test_delete_cells_all_columns(self): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, klass.ALL_COLUMNS) - expected_pb = data_pb2.Mutation( - delete_from_family=data_pb2.Mutation.DeleteFromFamily( + expected_pb = data_v1_pb2.Mutation( + delete_from_family=data_v1_pb2.Mutation.DeleteFromFamily( family_name=column_family_id, ), ) @@ -223,7 +226,8 @@ def test_delete_cells_no_columns(self): self.assertEqual(row._pb_mutations, []) def _delete_cells_helper(self, time_range=None): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) row_key = b'row_key' column = b'column' @@ -235,8 +239,8 @@ def _delete_cells_helper(self, time_range=None): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns, time_range=time_range) - expected_pb = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + expected_pb = data_v1_pb2.Mutation( + delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( family_name=column_family_id, column_qualifier=column, ), @@ -275,7 +279,8 @@ def test_delete_cells_with_bad_column(self): self.assertEqual(row._pb_mutations, []) def test_delete_cells_with_string_columns(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) row_key = b'row_key' column_family_id = u'column_family_id' @@ -290,14 +295,14 @@ def test_delete_cells_with_string_columns(self): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns) - expected_pb1 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + expected_pb1 = data_v1_pb2.Mutation( + delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( family_name=column_family_id, column_qualifier=column1_bytes, ), ) - expected_pb2 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + expected_pb2 = data_v1_pb2.Mutation( + delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( family_name=column_family_id, column_qualifier=column2_bytes, ), @@ -306,9 +311,10 @@ def test_delete_cells_with_string_columns(self): def test_commit(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub row_key = b'row_key' @@ -322,15 +328,15 @@ def test_commit(self): # Create request_pb value = b'bytes-value' - mutation = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( + mutation = data_v1_pb2.Mutation( + set_cell=data_v1_pb2.Mutation.SetCell( family_name=column_family_id, column_qualifier=column, timestamp_micros=-1, # Default value. value=value, ), ) - request_pb = messages_pb2.MutateRowRequest( + request_pb = messages_v1_pb2.MutateRowRequest( table_name=table_name, row_key=row_key, mutations=[mutation], @@ -421,9 +427,10 @@ def test__get_mutations(self): self.assertTrue(false_mutations is row._get_mutations(None)) def test_commit(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.row_filters import RowSampleFilter @@ -442,29 +449,29 @@ def test_commit(self): # Create request_pb value1 = b'bytes-value' - mutation1 = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( + mutation1 = data_v1_pb2.Mutation( + set_cell=data_v1_pb2.Mutation.SetCell( family_name=column_family_id1, column_qualifier=column1, timestamp_micros=-1, # Default value. value=value1, ), ) - mutation2 = data_pb2.Mutation( - delete_from_row=data_pb2.Mutation.DeleteFromRow(), + mutation2 = data_v1_pb2.Mutation( + delete_from_row=data_v1_pb2.Mutation.DeleteFromRow(), ) - mutation3 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + mutation3 = data_v1_pb2.Mutation( + delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( family_name=column_family_id2, column_qualifier=column2, ), ) - mutation4 = data_pb2.Mutation( - delete_from_family=data_pb2.Mutation.DeleteFromFamily( + mutation4 = data_v1_pb2.Mutation( + delete_from_family=data_v1_pb2.Mutation.DeleteFromFamily( family_name=column_family_id3, ), ) - request_pb = messages_pb2.CheckAndMutateRowRequest( + request_pb = messages_v1_pb2.CheckAndMutateRowRequest( table_name=table_name, row_key=row_key, predicate_filter=row_filter.to_pb(), @@ -474,7 +481,7 @@ def test_commit(self): # Create response_pb predicate_matched = True - response_pb = messages_pb2.CheckAndMutateRowResponse( + response_pb = messages_v1_pb2.CheckAndMutateRowResponse( predicate_matched=predicate_matched) # Patch the stub used by the API method. @@ -560,7 +567,8 @@ def test_clear(self): self.assertEqual(row._rule_pb_list, []) def test_append_cell_value(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) table = object() row_key = b'row_key' @@ -571,13 +579,14 @@ def test_append_cell_value(self): column_family_id = u'column_family_id' value = b'bytes-val' row.append_cell_value(column_family_id, column, value) - expected_pb = data_pb2.ReadModifyWriteRule( + expected_pb = data_v1_pb2.ReadModifyWriteRule( family_name=column_family_id, column_qualifier=column, append_value=value) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_increment_cell_value(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) table = object() row_key = b'row_key' @@ -588,16 +597,17 @@ def test_increment_cell_value(self): column_family_id = u'column_family_id' int_value = 281330 row.increment_cell_value(column_family_id, column, int_value) - expected_pb = data_pb2.ReadModifyWriteRule( + expected_pb = data_v1_pb2.ReadModifyWriteRule( family_name=column_family_id, column_qualifier=column, increment_amount=int_value) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_commit(self): from gcloud._testing import _Monkey - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import row as MUT @@ -613,11 +623,11 @@ def test_commit(self): # Create request_pb value = b'bytes-value' # We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value). - request_pb = messages_pb2.ReadModifyWriteRowRequest( + request_pb = messages_v1_pb2.ReadModifyWriteRowRequest( table_name=table_name, row_key=row_key, rules=[ - data_pb2.ReadModifyWriteRule( + data_v1_pb2.ReadModifyWriteRule( family_name=column_family_id, column_qualifier=column, append_value=value, @@ -693,7 +703,8 @@ def _callFUT(self, row_response): def test_it(self): from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) col_fam1 = u'col-fam-id' col_fam2 = u'col-fam-id2' @@ -723,28 +734,28 @@ def test_it(self): ], }, } - sample_input = data_pb2.Row( + sample_input = data_v1_pb2.Row( families=[ - data_pb2.Family( + data_v1_pb2.Family( name=col_fam1, columns=[ - data_pb2.Column( + data_v1_pb2.Column( qualifier=col_name1, cells=[ - data_pb2.Cell( + data_v1_pb2.Cell( value=cell_val1, timestamp_micros=microseconds, ), - data_pb2.Cell( + data_v1_pb2.Cell( value=cell_val2, timestamp_micros=microseconds, ), ], ), - data_pb2.Column( + data_v1_pb2.Column( qualifier=col_name2, cells=[ - data_pb2.Cell( + data_v1_pb2.Cell( value=cell_val3, timestamp_micros=microseconds, ), @@ -752,13 +763,13 @@ def test_it(self): ), ], ), - data_pb2.Family( + data_v1_pb2.Family( name=col_fam2, columns=[ - data_pb2.Column( + data_v1_pb2.Column( qualifier=col_name3, cells=[ - data_pb2.Cell( + data_v1_pb2.Cell( value=cell_val4, timestamp_micros=microseconds, ), @@ -779,7 +790,8 @@ def _callFUT(self, family_pb): def test_it(self): from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) col_fam1 = u'col-fam-id' col_name1 = b'col-name1' @@ -800,26 +812,26 @@ def test_it(self): ], } expected_output = (col_fam1, expected_dict) - sample_input = data_pb2.Family( + sample_input = data_v1_pb2.Family( name=col_fam1, columns=[ - data_pb2.Column( + data_v1_pb2.Column( qualifier=col_name1, cells=[ - data_pb2.Cell( + data_v1_pb2.Cell( value=cell_val1, timestamp_micros=microseconds, ), - data_pb2.Cell( + data_v1_pb2.Cell( value=cell_val2, timestamp_micros=microseconds, ), ], ), - data_pb2.Column( + data_v1_pb2.Column( qualifier=col_name2, cells=[ - data_pb2.Cell( + data_v1_pb2.Cell( value=cell_val3, timestamp_micros=microseconds, ), diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index 56b1c15f0655..ce901fb30dce 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -28,20 +28,20 @@ def _makeOne(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) timestamp_micros = 18738724000 # Make sure millis granularity timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) value = b'value-bytes' if labels is None: - cell_pb = data_pb2.Cell(value=value, - timestamp_micros=timestamp_micros) + cell_pb = data_v1_pb2.Cell( + value=value, timestamp_micros=timestamp_micros) cell_expected = self._makeOne(value, timestamp) else: - cell_pb = data_pb2.Cell(value=value, - timestamp_micros=timestamp_micros, - labels=labels) + cell_pb = data_v1_pb2.Cell( + value=value, timestamp_micros=timestamp_micros, labels=labels) cell_expected = self._makeOne(value, timestamp, labels=labels) klass = self._getTargetClass() @@ -209,10 +209,10 @@ def test_clear(self): def test__handle_commit_row(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) index = last_chunk_index = 1 self.assertFalse(partial_row_data.committed) @@ -221,33 +221,34 @@ def test__handle_commit_row(self): def test__handle_commit_row_false(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=False) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=False) with self.assertRaises(ValueError): partial_row_data._handle_commit_row(chunk, None, None) def test__handle_commit_row_not_last_chunk(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) with self.assertRaises(ValueError): index = 0 last_chunk_index = 1 self.assertNotEqual(index, last_chunk_index) - partial_row_data._handle_commit_row(chunk, index, last_chunk_index) + partial_row_data._handle_commit_row( + chunk, index, last_chunk_index) def test__handle_reset_row(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=True) # Modify the PartialRowData object so we can check it's been cleared. partial_row_data._cells = {1: 2} @@ -258,33 +259,35 @@ def test__handle_reset_row(self): def test__handle_reset_row_failure(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=False) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=False) with self.assertRaises(ValueError): partial_row_data._handle_reset_row(chunk) def test__handle_row_contents(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.row_data import Cell partial_row_data = self._makeOne(None) - cell1_pb = data_pb2.Cell(timestamp_micros=1, value=b'val1') - cell2_pb = data_pb2.Cell(timestamp_micros=200, value=b'val2') - cell3_pb = data_pb2.Cell(timestamp_micros=300000, value=b'val3') + cell1_pb = data_v1_pb2.Cell(timestamp_micros=1, value=b'val1') + cell2_pb = data_v1_pb2.Cell(timestamp_micros=200, value=b'val2') + cell3_pb = data_v1_pb2.Cell(timestamp_micros=300000, value=b'val3') col1 = b'col1' col2 = b'col2' columns = [ - data_pb2.Column(qualifier=col1, cells=[cell1_pb, cell2_pb]), - data_pb2.Column(qualifier=col2, cells=[cell3_pb]), + data_v1_pb2.Column(qualifier=col1, cells=[cell1_pb, cell2_pb]), + data_v1_pb2.Column(qualifier=col2, cells=[cell3_pb]), ] family_name = u'name' - row_contents = data_pb2.Family(name=family_name, columns=columns) - chunk = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) + row_contents = data_v1_pb2.Family(name=family_name, columns=columns) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk( + row_contents=row_contents) self.assertEqual(partial_row_data.cells, {}) partial_row_data._handle_row_contents(chunk) @@ -297,31 +300,34 @@ def test__handle_row_contents(self): self.assertEqual(partial_row_data.cells, expected_cells) def test_update_from_read_rows(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) row_key = b'row-key' partial_row_data = self._makeOne(row_key) # Set-up chunk1, some data that will be reset by chunk2. ignored_family_name = u'ignore-name' - row_contents = data_pb2.Family(name=ignored_family_name) - chunk1 = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) + row_contents = data_v1_pb2.Family(name=ignored_family_name) + chunk1 = messages_v1_pb2.ReadRowsResponse.Chunk( + row_contents=row_contents) # Set-up chunk2, a reset row. - chunk2 = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) + chunk2 = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=True) # Set-up chunk3, a column family with no columns. family_name = u'name' - row_contents = data_pb2.Family(name=family_name) - chunk3 = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) + row_contents = data_v1_pb2.Family(name=family_name) + chunk3 = messages_v1_pb2.ReadRowsResponse.Chunk( + row_contents=row_contents) # Set-up chunk4, a commit row. - chunk4 = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) + chunk4 = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) # Prepare request and make sure PartialRowData is empty before. - read_rows_response_pb = messages_pb2.ReadRowsResponse( + read_rows_response_pb = messages_v1_pb2.ReadRowsResponse( row_key=row_key, chunks=[chunk1, chunk2, chunk3, chunk4]) self.assertEqual(partial_row_data.cells, {}) self.assertFalse(partial_row_data.committed) @@ -346,7 +352,7 @@ def test_update_from_read_rows_while_committed(self): def test_update_from_read_rows_row_key_disagree(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) row_key1 = b'row-key1' row_key2 = b'row-key2' @@ -354,7 +360,8 @@ def test_update_from_read_rows_row_key_disagree(self): self.assertFalse(partial_row_data._chunks_encountered) self.assertNotEqual(row_key1, row_key2) - read_rows_response_pb = messages_pb2.ReadRowsResponse(row_key=row_key2) + read_rows_response_pb = messages_v1_pb2.ReadRowsResponse( + row_key=row_key2) with self.assertRaises(ValueError): partial_row_data.update_from_read_rows(read_rows_response_pb) @@ -362,14 +369,14 @@ def test_update_from_read_rows_row_key_disagree(self): def test_update_from_read_rows_empty_chunk(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) row_key = b'row-key' partial_row_data = self._makeOne(row_key) self.assertFalse(partial_row_data._chunks_encountered) - chunk = messages_pb2.ReadRowsResponse.Chunk() - read_rows_response_pb = messages_pb2.ReadRowsResponse( + chunk = messages_v1_pb2.ReadRowsResponse.Chunk() + read_rows_response_pb = messages_v1_pb2.ReadRowsResponse( row_key=row_key, chunks=[chunk]) # This makes it an "empty" chunk. @@ -451,11 +458,11 @@ def test_cancel(self): def test_consume_next(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.row_data import PartialRowData row_key = b'row-key' - value_pb = messages_pb2.ReadRowsResponse(row_key=row_key) + value_pb = messages_v1_pb2.ReadRowsResponse(row_key=row_key) response_iterator = _MockCancellableIterator(value_pb) partial_rows_data = self._makeOne(response_iterator) self.assertEqual(partial_rows_data.rows, {}) @@ -465,13 +472,13 @@ def test_consume_next(self): def test_consume_next_row_exists(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.row_data import PartialRowData row_key = b'row-key' - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - value_pb = messages_pb2.ReadRowsResponse(row_key=row_key, - chunks=[chunk]) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) + value_pb = messages_v1_pb2.ReadRowsResponse( + row_key=row_key, chunks=[chunk]) response_iterator = _MockCancellableIterator(value_pb) partial_rows_data = self._makeOne(response_iterator) existing_values = PartialRowData(row_key) @@ -495,7 +502,8 @@ def test_consume_all(self): partial_rows_data = klass(response_iterator) self.assertEqual(partial_rows_data._consumed, []) partial_rows_data.consume_all() - self.assertEqual(partial_rows_data._consumed, [value1, value2, value3]) + self.assertEqual( + partial_rows_data._consumed, [value1, value2, value3]) def test_consume_all_with_max_loops(self): klass = self._getDoNothingClass() @@ -507,7 +515,8 @@ def test_consume_all_with_max_loops(self): partial_rows_data.consume_all(max_loops=1) self.assertEqual(partial_rows_data._consumed, [value1]) # Make sure the iterator still has the remaining values. - self.assertEqual(list(response_iterator.iter_values), [value2, value3]) + self.assertEqual( + list(response_iterator.iter_values), [value2, value3]) class _MockCancellableIterator(object): diff --git a/gcloud/bigtable/test_row_filters.py b/gcloud/bigtable/test_row_filters.py index aed90574683f..768ffb79bd32 100644 --- a/gcloud/bigtable/test_row_filters.py +++ b/gcloud/bigtable/test_row_filters.py @@ -60,12 +60,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(sink=flag) + expected_pb = data_v1_pb2.RowFilter(sink=flag) self.assertEqual(pb_val, expected_pb) @@ -79,12 +80,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(pass_all_filter=flag) + expected_pb = data_v1_pb2.RowFilter(pass_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -98,12 +100,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(block_all_filter=flag) + expected_pb = data_v1_pb2.RowFilter(block_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -156,12 +159,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) regex = b'row-key-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(row_key_regex_filter=regex) + expected_pb = data_v1_pb2.RowFilter(row_key_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -192,12 +196,13 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) sample = 0.25 row_filter = self._makeOne(sample) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(row_sample_filter=sample) + expected_pb = data_v1_pb2.RowFilter(row_sample_filter=sample) self.assertEqual(pb_val, expected_pb) @@ -211,12 +216,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) regex = u'family-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(family_name_regex_filter=regex) + expected_pb = data_v1_pb2.RowFilter(family_name_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -230,12 +236,14 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) regex = b'column-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(column_qualifier_regex_filter=regex) + expected_pb = data_v1_pb2.RowFilter( + column_qualifier_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -280,7 +288,8 @@ def test___ne__same_value(self): def _to_pb_helper(self, start_micros=None, end_micros=None): import datetime from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) pb_kwargs = {} @@ -294,7 +303,7 @@ def _to_pb_helper(self, start_micros=None, end_micros=None): pb_kwargs['end_timestamp_micros'] = end_micros time_range = self._makeOne(start=start, end=end) - expected_pb = data_pb2.TimestampRange(**pb_kwargs) + expected_pb = data_v1_pb2.TimestampRange(**pb_kwargs) self.assertEqual(time_range.to_pb(), expected_pb) def test_to_pb(self): @@ -342,14 +351,15 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import TimestampRange range_ = TimestampRange() row_filter = self._makeOne(range_) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter( - timestamp_range_filter=data_pb2.TimestampRange()) + expected_pb = data_v1_pb2.RowFilter( + timestamp_range_filter=data_v1_pb2.TimestampRange()) self.assertEqual(pb_val, expected_pb) @@ -377,10 +387,12 @@ def test_constructor_explicit(self): end_column = object() inclusive_start = object() inclusive_end = object() - row_filter = self._makeOne(column_family_id, start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + row_filter = self._makeOne( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) self.assertTrue(row_filter.column_family_id is column_family_id) self.assertTrue(row_filter.start_column is start_column) self.assertTrue(row_filter.end_column is end_column) @@ -422,66 +434,71 @@ def test___eq__type_differ(self): self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) column_family_id = u'column-family-id' row_filter = self._makeOne(column_family_id) - col_range_pb = data_pb2.ColumnRange(family_name=column_family_id) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + col_range_pb = data_v1_pb2.ColumnRange(family_name=column_family_id) + expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, start_column=column) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = data_v1_pb2.ColumnRange( family_name=column_family_id, start_qualifier_inclusive=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, start_column=column, inclusive_start=False) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = data_v1_pb2.ColumnRange( family_name=column_family_id, start_qualifier_exclusive=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, end_column=column) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = data_v1_pb2.ColumnRange( family_name=column_family_id, end_qualifier_inclusive=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, end_column=column, inclusive_end=False) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = data_v1_pb2.ColumnRange( family_name=column_family_id, end_qualifier_exclusive=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -495,12 +512,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) regex = b'value-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(value_regex_filter=regex) + expected_pb = data_v1_pb2.RowFilter(value_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -561,47 +579,52 @@ def test___eq__type_differ(self): self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) row_filter = self._makeOne() - expected_pb = data_pb2.RowFilter( - value_range_filter=data_pb2.ValueRange()) + expected_pb = data_v1_pb2.RowFilter( + value_range_filter=data_v1_pb2.ValueRange()) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) value = b'some-value' row_filter = self._makeOne(start_value=value) - val_range_pb = data_pb2.ValueRange(start_value_inclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = data_v1_pb2.ValueRange(start_value_inclusive=value) + expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) value = b'some-value' row_filter = self._makeOne(start_value=value, inclusive_start=False) - val_range_pb = data_pb2.ValueRange(start_value_exclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = data_v1_pb2.ValueRange(start_value_exclusive=value) + expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) value = b'some-value' row_filter = self._makeOne(end_value=value) - val_range_pb = data_pb2.ValueRange(end_value_inclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = data_v1_pb2.ValueRange(end_value_inclusive=value) + expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) value = b'some-value' row_filter = self._makeOne(end_value=value, inclusive_end=False) - val_range_pb = data_pb2.ValueRange(end_value_exclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = data_v1_pb2.ValueRange(end_value_exclusive=value) + expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -649,12 +672,14 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) num_cells = 76 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(cells_per_row_offset_filter=num_cells) + expected_pb = data_v1_pb2.RowFilter( + cells_per_row_offset_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -668,12 +693,14 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) num_cells = 189 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(cells_per_row_limit_filter=num_cells) + expected_pb = data_v1_pb2.RowFilter( + cells_per_row_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -687,12 +714,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) num_cells = 10 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter( + expected_pb = data_v1_pb2.RowFilter( cells_per_column_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -707,12 +735,13 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(strip_value_transformer=flag) + expected_pb = data_v1_pb2.RowFilter(strip_value_transformer=flag) self.assertEqual(pb_val, expected_pb) @@ -743,12 +772,13 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) label = u'label' row_filter = self._makeOne(label) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(apply_label_transformer=label) + expected_pb = data_v1_pb2.RowFilter(apply_label_transformer=label) self.assertEqual(pb_val, expected_pb) @@ -793,7 +823,8 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -806,15 +837,16 @@ def test_to_pb(self): row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - chain=data_pb2.RowFilter.Chain( + expected_pb = data_v1_pb2.RowFilter( + chain=data_v1_pb2.RowFilter.Chain( filters=[row_filter1_pb, row_filter2_pb], ), ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import CellsRowLimitFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -831,8 +863,8 @@ def test_to_pb_nested(self): row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() - expected_pb = data_pb2.RowFilter( - chain=data_pb2.RowFilter.Chain( + expected_pb = data_v1_pb2.RowFilter( + chain=data_v1_pb2.RowFilter.Chain( filters=[row_filter3_pb, row_filter4_pb], ), ) @@ -849,7 +881,8 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -862,15 +895,16 @@ def test_to_pb(self): row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - interleave=data_pb2.RowFilter.Interleave( + expected_pb = data_v1_pb2.RowFilter( + interleave=data_v1_pb2.RowFilter.Interleave( filters=[row_filter1_pb, row_filter2_pb], ), ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import CellsRowLimitFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -887,8 +921,8 @@ def test_to_pb_nested(self): row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() - expected_pb = data_pb2.RowFilter( - interleave=data_pb2.RowFilter.Interleave( + expected_pb = data_v1_pb2.RowFilter( + interleave=data_v1_pb2.RowFilter.Interleave( filters=[row_filter3_pb, row_filter4_pb], ), ) @@ -938,7 +972,8 @@ def test___eq__type_differ(self): self.assertNotEqual(cond_filter1, cond_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import CellsRowOffsetFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -956,8 +991,8 @@ def test_to_pb(self): false_filter=row_filter3) filter_pb = row_filter4.to_pb() - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( + expected_pb = data_v1_pb2.RowFilter( + condition=data_v1_pb2.RowFilter.Condition( predicate_filter=row_filter1_pb, true_filter=row_filter2_pb, false_filter=row_filter3_pb, @@ -966,7 +1001,8 @@ def test_to_pb(self): self.assertEqual(filter_pb, expected_pb) def test_to_pb_true_only(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -979,8 +1015,8 @@ def test_to_pb_true_only(self): row_filter3 = self._makeOne(row_filter1, true_filter=row_filter2) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( + expected_pb = data_v1_pb2.RowFilter( + condition=data_v1_pb2.RowFilter.Condition( predicate_filter=row_filter1_pb, true_filter=row_filter2_pb, ), @@ -988,7 +1024,8 @@ def test_to_pb_true_only(self): self.assertEqual(filter_pb, expected_pb) def test_to_pb_false_only(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -1001,8 +1038,8 @@ def test_to_pb_false_only(self): row_filter3 = self._makeOne(row_filter1, false_filter=row_filter2) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( + expected_pb = data_v1_pb2.RowFilter( + condition=data_v1_pb2.RowFilter.Condition( predicate_filter=row_filter1_pb, false_filter=row_filter2_pb, ), diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index 09d5baba225d..0f015777aadf 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -126,9 +126,9 @@ def test___ne__(self): def _create_test_helper(self, initial_split_keys): from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + bigtable_table_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -144,14 +144,14 @@ def _create_test_helper(self, initial_split_keys): table = self._makeOne(table_id, cluster) # Create request_pb - request_pb = messages_pb2.CreateTableRequest( + request_pb = messages_v1_pb2.CreateTableRequest( initial_split_keys=initial_split_keys, name=cluster_name, table_id=table_id, ) # Create response_pb - response_pb = data_pb2.Table() + response_pb = data_v1_pb2.Table() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -178,9 +178,9 @@ def test_create_with_split_keys(self): def _list_column_families_helper(self, column_family_name=None): from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) + bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + bigtable_table_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -197,15 +197,15 @@ def _list_column_families_helper(self, column_family_name=None): # Create request_pb table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.GetTableRequest(name=table_name) + request_pb = messages_v1_pb2.GetTableRequest(name=table_name) # Create response_pb column_family_id = 'foo' if column_family_name is None: column_family_name = (table_name + '/columnFamilies/' + column_family_id) - column_family = data_pb2.ColumnFamily(name=column_family_name) - response_pb = data_pb2.Table( + column_family = data_v1_pb2.ColumnFamily(name=column_family_name) + response_pb = data_v1_pb2.Table( column_families={column_family_id: column_family}, ) @@ -238,7 +238,7 @@ def test_list_column_families_failure(self): def test_delete(self): from google.protobuf import empty_pb2 from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + bigtable_table_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -255,7 +255,7 @@ def test_delete(self): # Create request_pb table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.DeleteTableRequest(name=table_name) + request_pb = messages_v1_pb2.DeleteTableRequest(name=table_name) # Create response_pb response_pb = empty_pb2.Empty() @@ -278,7 +278,7 @@ def test_delete(self): def _read_row_helper(self, chunks): from gcloud._testing import _Monkey from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.row_data import PartialRowData from gcloud.bigtable import table as MUT @@ -304,8 +304,8 @@ def mock_create_row_request(table_name, row_key, filter_): # Create response_iterator row_key = b'row-key' - response_pb = messages_pb2.ReadRowsResponse(row_key=row_key, - chunks=chunks) + response_pb = messages_v1_pb2.ReadRowsResponse( + row_key=row_key, chunks=chunks) response_iterator = [response_pb] # Patch the stub used by the API method. @@ -334,9 +334,9 @@ def mock_create_row_request(table_name, row_key, filter_): def test_read_row(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) chunks = [chunk] self._read_row_helper(chunks) @@ -346,10 +346,10 @@ def test_read_empty_row(self): def test_read_row_still_partial(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) # There is never a "commit row". - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) + chunk = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=True) chunks = [chunk] with self.assertRaises(ValueError): self._read_row_helper(chunks) @@ -416,7 +416,7 @@ def mock_create_row_request(table_name, **kwargs): def test_sample_row_keys(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -433,7 +433,8 @@ def test_sample_row_keys(self): # Create request_pb table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.SampleRowKeysRequest(table_name=table_name) + request_pb = messages_v1_pb2.SampleRowKeysRequest( + table_name=table_name) # Create response_iterator response_iterator = object() # Just passed to a mock. @@ -466,11 +467,12 @@ def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None, def test_table_name_only(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) table_name = 'table_name' result = self._callFUT(table_name) - expected_result = messages_pb2.ReadRowsRequest(table_name=table_name) + expected_result = messages_v1_pb2.ReadRowsRequest( + table_name=table_name) self.assertEqual(result, expected_result) def test_row_key_row_range_conflict(self): @@ -479,70 +481,74 @@ def test_row_key_row_range_conflict(self): def test_row_key(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) table_name = 'table_name' row_key = b'row_key' result = self._callFUT(table_name, row_key=row_key) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = messages_v1_pb2.ReadRowsRequest( table_name=table_name, row_key=row_key, ) self.assertEqual(result, expected_result) def test_row_range_start_key(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) table_name = 'table_name' start_key = b'start_key' result = self._callFUT(table_name, start_key=start_key) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = messages_v1_pb2.ReadRowsRequest( table_name=table_name, - row_range=data_pb2.RowRange(start_key=start_key), + row_range=data_v1_pb2.RowRange(start_key=start_key), ) self.assertEqual(result, expected_result) def test_row_range_end_key(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) table_name = 'table_name' end_key = b'end_key' result = self._callFUT(table_name, end_key=end_key) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = messages_v1_pb2.ReadRowsRequest( table_name=table_name, - row_range=data_pb2.RowRange(end_key=end_key), + row_range=data_v1_pb2.RowRange(end_key=end_key), ) self.assertEqual(result, expected_result) def test_row_range_both_keys(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated import ( + bigtable_service_messages_pb2 as messages_v1_pb2) table_name = 'table_name' start_key = b'start_key' end_key = b'end_key' result = self._callFUT(table_name, start_key=start_key, end_key=end_key) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = messages_v1_pb2.ReadRowsRequest( table_name=table_name, - row_range=data_pb2.RowRange(start_key=start_key, end_key=end_key), + row_range=data_v1_pb2.RowRange( + start_key=start_key, end_key=end_key), ) self.assertEqual(result, expected_result) def test_with_filter(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter table_name = 'table_name' row_filter = RowSampleFilter(0.33) result = self._callFUT(table_name, filter_=row_filter) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = messages_v1_pb2.ReadRowsRequest( table_name=table_name, filter=row_filter.to_pb(), ) @@ -550,13 +556,13 @@ def test_with_filter(self): def test_with_allow_row_interleaving(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) table_name = 'table_name' allow_row_interleaving = True result = self._callFUT(table_name, allow_row_interleaving=allow_row_interleaving) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = messages_v1_pb2.ReadRowsRequest( table_name=table_name, allow_row_interleaving=allow_row_interleaving, ) @@ -564,12 +570,12 @@ def test_with_allow_row_interleaving(self): def test_with_limit(self): from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + bigtable_service_messages_pb2 as messages_v1_pb2) table_name = 'table_name' limit = 1337 result = self._callFUT(table_name, limit=limit) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = messages_v1_pb2.ReadRowsRequest( table_name=table_name, num_rows_limit=limit, ) From 3c533e038b412c9d43b5236b3e5a00c84639deb4 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 19:01:11 -0400 Subject: [PATCH 022/103] Factor out generating GCRule-related protobufs. --- gcloud/bigtable/test_column_family.py | 104 ++++++++++++++------------ 1 file changed, 55 insertions(+), 49 deletions(-) diff --git a/gcloud/bigtable/test_column_family.py b/gcloud/bigtable/test_column_family.py index 00384a3df0df..19971b788f4c 100644 --- a/gcloud/bigtable/test_column_family.py +++ b/gcloud/bigtable/test_column_family.py @@ -107,12 +107,10 @@ def test___ne__same_value(self): self.assertFalse(comparison_val) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) max_num_versions = 1337 gc_rule = self._makeOne(max_num_versions=max_num_versions) pb_val = gc_rule.to_pb() - expected = data_v1_pb2.GcRule(max_num_versions=max_num_versions) + expected = _GcRulePB(max_num_versions=max_num_versions) self.assertEqual(pb_val, expected) @@ -147,14 +145,12 @@ def test___ne__same_value(self): def test_to_pb(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) max_age = datetime.timedelta(seconds=1) duration = duration_pb2.Duration(seconds=1) gc_rule = self._makeOne(max_age=max_age) pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, data_v1_pb2.GcRule(max_age=duration)) + self.assertEqual(pb_val, _GcRulePB(max_age=duration)) class TestGCRuleUnion(unittest2.TestCase): @@ -193,23 +189,21 @@ def test___ne__same_value(self): def test_to_pb(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_v1_pb2.GcRule( + pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_v1_pb2.GcRule( - union=data_v1_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) + pb_rule3 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) gc_rule_pb = rule3.to_pb() self.assertEqual(gc_rule_pb, pb_rule3) @@ -217,31 +211,29 @@ def test_to_pb(self): def test_to_pb_nested(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_v1_pb2.GcRule( + pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_v1_pb2.GcRule( - union=data_v1_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) + pb_rule3 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_v1_pb2.GcRule(max_num_versions=max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_v1_pb2.GcRule( - union=data_v1_pb2.GcRule.Union(rules=[pb_rule3, pb_rule4])) + pb_rule5 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) gc_rule_pb = rule5.to_pb() self.assertEqual(gc_rule_pb, pb_rule5) @@ -283,23 +275,21 @@ def test___ne__same_value(self): def test_to_pb(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_v1_pb2.GcRule( + pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_v1_pb2.GcRule( - intersection=data_v1_pb2.GcRule.Intersection( + pb_rule3 = _GcRulePB( + intersection=_GcRuleIntersectionPB( rules=[pb_rule1, pb_rule2])) gc_rule_pb = rule3.to_pb() @@ -308,32 +298,30 @@ def test_to_pb(self): def test_to_pb_nested(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_v1_pb2.GcRule(max_num_versions=max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_v1_pb2.GcRule( + pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_v1_pb2.GcRule( - intersection=data_v1_pb2.GcRule.Intersection( + pb_rule3 = _GcRulePB( + intersection=_GcRuleIntersectionPB( rules=[pb_rule1, pb_rule2])) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_v1_pb2.GcRule(max_num_versions=max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_v1_pb2.GcRule( - intersection=data_v1_pb2.GcRule.Intersection( + pb_rule5 = _GcRulePB( + intersection=_GcRuleIntersectionPB( rules=[pb_rule3, pb_rule4])) gc_rule_pb = rule5.to_pb() @@ -401,8 +389,6 @@ def test___ne__(self): self.assertNotEqual(column_family1, column_family2) def _create_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( bigtable_table_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub @@ -423,9 +409,9 @@ def _create_test_helper(self, gc_rule=None): # Create request_pb if gc_rule is None: - column_family_pb = data_v1_pb2.ColumnFamily() + column_family_pb = _ColumnFamilyPB() else: - column_family_pb = data_v1_pb2.ColumnFamily( + column_family_pb = _ColumnFamilyPB( gc_rule=gc_rule.to_pb()) request_pb = messages_v1_pb2.CreateColumnFamilyRequest( name=table_name, @@ -434,7 +420,7 @@ def _create_test_helper(self, gc_rule=None): ) # Create response_pb - response_pb = data_v1_pb2.ColumnFamily() + response_pb = _ColumnFamilyPB() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -462,8 +448,6 @@ def test_create_with_gc_rule(self): self._create_test_helper(gc_rule=gc_rule) def _update_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -484,15 +468,15 @@ def _update_test_helper(self, gc_rule=None): # Create request_pb if gc_rule is None: - request_pb = data_v1_pb2.ColumnFamily(name=column_family_name) + request_pb = _ColumnFamilyPB(name=column_family_name) else: - request_pb = data_v1_pb2.ColumnFamily( + request_pb = _ColumnFamilyPB( name=column_family_name, gc_rule=gc_rule.to_pb(), ) # Create response_pb - response_pb = data_v1_pb2.ColumnFamily() + response_pb = _ColumnFamilyPB() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -572,10 +556,8 @@ def _callFUT(self, *args, **kwargs): return _gc_rule_from_pb(*args, **kwargs) def test_empty(self): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) - gc_rule_pb = data_v1_pb2.GcRule() + gc_rule_pb = _GcRulePB() self.assertEqual(self._callFUT(gc_rule_pb), None) def test_max_num_versions(self): @@ -640,6 +622,30 @@ def WhichOneof(cls, name): self.assertEqual(MockProto.names, ['rule']) +def _GcRulePB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_table_data_pb2 as data_v1_pb2) + return data_v1_pb2.GcRule(*args, **kw) + + +def _GcRuleIntersectionPB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_table_data_pb2 as data_v1_pb2) + return data_v1_pb2.GcRule.Intersection(*args, **kw) + + +def _GcRuleUnionPB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_table_data_pb2 as data_v1_pb2) + return data_v1_pb2.GcRule.Union(*args, **kw) + + +def _ColumnFamilyPB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_table_data_pb2 as data_v1_pb2) + return data_v1_pb2.ColumnFamily(*args, **kw) + + class _Cluster(object): def __init__(self, client=None): From 7e9c61d0ee459f8fad73fc960606af80b1a699c0 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 19:43:02 -0400 Subject: [PATCH 023/103] Update bigtable.column_family to use V2 protos. Note that {Create,Update,Delete}ColumnFamily messages all collapse to ModifyColumnFamilies. --- gcloud/bigtable/column_family.py | 79 +++++++++++++++------------ gcloud/bigtable/test_column_family.py | 76 ++++++++++++++------------ 2 files changed, 85 insertions(+), 70 deletions(-) diff --git a/gcloud/bigtable/column_family.py b/gcloud/bigtable/column_family.py index 5d17be804f9c..9088e24a72ab 100644 --- a/gcloud/bigtable/column_family.py +++ b/gcloud/bigtable/column_family.py @@ -20,10 +20,10 @@ from google.protobuf import duration_pb2 from gcloud._helpers import _total_seconds -from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) def _timedelta_to_duration_pb(timedelta_val): @@ -111,10 +111,10 @@ def __eq__(self, other): def to_pb(self): """Converts the garbage collection rule to a protobuf. - :rtype: :class:`.data_v1_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - return data_v1_pb2.GcRule(max_num_versions=self.max_num_versions) + return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions) class MaxAgeGCRule(GarbageCollectionRule): @@ -135,11 +135,11 @@ def __eq__(self, other): def to_pb(self): """Converts the garbage collection rule to a protobuf. - :rtype: :class:`.data_v1_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ max_age = _timedelta_to_duration_pb(self.max_age) - return data_v1_pb2.GcRule(max_age=max_age) + return table_v2_pb2.GcRule(max_age=max_age) class GCRuleUnion(GarbageCollectionRule): @@ -160,12 +160,12 @@ def __eq__(self, other): def to_pb(self): """Converts the union into a single GC rule as a protobuf. - :rtype: :class:`.data_v1_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - union = data_v1_pb2.GcRule.Union( + union = table_v2_pb2.GcRule.Union( rules=[rule.to_pb() for rule in self.rules]) - return data_v1_pb2.GcRule(union=union) + return table_v2_pb2.GcRule(union=union) class GCRuleIntersection(GarbageCollectionRule): @@ -186,12 +186,12 @@ def __eq__(self, other): def to_pb(self): """Converts the intersection into a single GC rule as a protobuf. - :rtype: :class:`.data_v1_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - intersection = data_v1_pb2.GcRule.Intersection( + intersection = table_v2_pb2.GcRule.Intersection( rules=[rule.to_pb() for rule in self.rules]) - return data_v1_pb2.GcRule(intersection=intersection) + return table_v2_pb2.GcRule(intersection=intersection) class ColumnFamily(object): @@ -251,21 +251,22 @@ def __ne__(self, other): def create(self): """Create this column family.""" if self.gc_rule is None: - column_family = data_v1_pb2.ColumnFamily() + column_family = table_v2_pb2.ColumnFamily() else: - column_family = data_v1_pb2.ColumnFamily( + column_family = table_v2_pb2.ColumnFamily( gc_rule=self.gc_rule.to_pb()) - request_pb = messages_v1_pb2.CreateColumnFamilyRequest( - name=self._table.name, - column_family_id=self.column_family_id, - column_family=column_family, + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + create=column_family, ) client = self._table._cluster._client - # We expect a `.data_v1_pb2.ColumnFamily`. We ignore it since the only + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_stub.CreateColumnFamily(request_pb, - client.timeout_seconds) + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds) def update(self): """Update this column family. @@ -275,30 +276,40 @@ def update(self): Only the GC rule can be updated. By changing the column family ID, you will simply be referring to a different column family. """ - request_kwargs = {'name': self.name} - if self.gc_rule is not None: - request_kwargs['gc_rule'] = self.gc_rule.to_pb() - request_pb = data_v1_pb2.ColumnFamily(**request_kwargs) + if self.gc_rule is None: + column_family = table_v2_pb2.ColumnFamily() + else: + column_family = table_v2_pb2.ColumnFamily( + gc_rule=self.gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + update=column_family) client = self._table._cluster._client - # We expect a `.data_v1_pb2.ColumnFamily`. We ignore it since the only + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_stub.UpdateColumnFamily(request_pb, - client.timeout_seconds) + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds) def delete(self): """Delete this column family.""" - request_pb = messages_v1_pb2.DeleteColumnFamilyRequest(name=self.name) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + drop=True) client = self._table._cluster._client # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.DeleteColumnFamily(request_pb, - client.timeout_seconds) + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds) def _gc_rule_from_pb(gc_rule_pb): """Convert a protobuf GC rule to a native object. - :type gc_rule_pb: :class:`.data_v1_pb2.GcRule` + :type gc_rule_pb: :class:`.table_v2_pb2.GcRule` :param gc_rule_pb: The GC rule to convert. :rtype: :class:`GarbageCollectionRule` or :data:`NoneType ` diff --git a/gcloud/bigtable/test_column_family.py b/gcloud/bigtable/test_column_family.py index 19971b788f4c..77d3f7bfdfd7 100644 --- a/gcloud/bigtable/test_column_family.py +++ b/gcloud/bigtable/test_column_family.py @@ -389,8 +389,8 @@ def test___ne__(self): self.assertNotEqual(column_family1, column_family2) def _create_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_v1_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -411,12 +411,12 @@ def _create_test_helper(self, gc_rule=None): if gc_rule is None: column_family_pb = _ColumnFamilyPB() else: - column_family_pb = _ColumnFamilyPB( - gc_rule=gc_rule.to_pb()) - request_pb = messages_v1_pb2.CreateColumnFamilyRequest( - name=table_name, - column_family_id=column_family_id, - column_family=column_family_pb, + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + create=column_family_pb, ) # Create response_pb @@ -434,7 +434,7 @@ def _create_test_helper(self, gc_rule=None): self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( - 'CreateColumnFamily', + 'ModifyColumnFamilies', (request_pb, timeout_seconds), {}, )]) @@ -449,6 +449,8 @@ def test_create_with_gc_rule(self): def _update_test_helper(self, gc_rule=None): from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) project_id = 'project-id' zone = 'zone' @@ -458,8 +460,6 @@ def _update_test_helper(self, gc_rule=None): timeout_seconds = 28 table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = ( - table_name + '/columnFamilies/' + column_family_id) client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) @@ -468,12 +468,15 @@ def _update_test_helper(self, gc_rule=None): # Create request_pb if gc_rule is None: - request_pb = _ColumnFamilyPB(name=column_family_name) + column_family_pb = _ColumnFamilyPB() else: - request_pb = _ColumnFamilyPB( - name=column_family_name, - gc_rule=gc_rule.to_pb(), - ) + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + update=column_family_pb, + ) # Create response_pb response_pb = _ColumnFamilyPB() @@ -490,7 +493,7 @@ def _update_test_helper(self, gc_rule=None): self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( - 'UpdateColumnFamily', + 'ModifyColumnFamilies', (request_pb, timeout_seconds), {}, )]) @@ -505,8 +508,8 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_v1_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -517,16 +520,17 @@ def test_delete(self): timeout_seconds = 7 table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = ( - table_name + '/columnFamilies/' + column_family_id) client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) column_family = self._makeOne(column_family_id, table) # Create request_pb - request_pb = messages_v1_pb2.DeleteColumnFamilyRequest( - name=column_family_name) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + drop=True) # Create response_pb response_pb = empty_pb2.Empty() @@ -543,7 +547,7 @@ def test_delete(self): self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( - 'DeleteColumnFamily', + 'ModifyColumnFamilies', (request_pb, timeout_seconds), {}, )]) @@ -623,27 +627,27 @@ def WhichOneof(cls, name): def _GcRulePB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) - return data_v1_pb2.GcRule(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule(*args, **kw) def _GcRuleIntersectionPB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) - return data_v1_pb2.GcRule.Intersection(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Intersection(*args, **kw) def _GcRuleUnionPB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) - return data_v1_pb2.GcRule.Union(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Union(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) - return data_v1_pb2.ColumnFamily(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) class _Cluster(object): From 5d179bc79b869f694510719862567fb44b6d19c6 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 27 Jun 2016 11:26:22 -0400 Subject: [PATCH 024/103] Clarify: testcases leaving incomplete final rows do not raise. Assert that the state is 'ROW_IN_PROGRESS', and check that the completed rows match the expected results. --- gcloud/bigtable/row_data.py | 1 - gcloud/bigtable/test_row_data.py | 49 ++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py index 5191a1759b16..b6cf55d98f0a 100644 --- a/gcloud/bigtable/row_data.py +++ b/gcloud/bigtable/row_data.py @@ -408,7 +408,6 @@ def rows(self): :rtype: dict :returns: Dictionary of :class:`PartialRowData`. """ - _raise_if(self.state not in (self.NEW_ROW,)) # NOTE: To avoid duplicating large objects, this is just the # mutable private data. return self._rows diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index eb85a8bd9fe7..bfc660c8bded 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -687,7 +687,7 @@ def test_invalid_empty_second_chunk(self): with self.assertRaises(ReadRowsResponseError): prd.consume_next() - # JSON Error cases + # JSON Error cases: invalid chunks def _fail_during_consume(self, testcase_name): from gcloud.bigtable.row_data import ReadRowsResponseError @@ -698,19 +698,6 @@ def _fail_during_consume(self, testcase_name): with self.assertRaises(ReadRowsResponseError): prd.consume_next() - def _fail_during_rows(self, testcase_name): - from gcloud.bigtable.row_data import ReadRowsResponseError - chunks, _ = self._load_json_test(testcase_name) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) - prd.consume_next() - with self.assertRaises(ReadRowsResponseError): - _ = prd.rows - - def test_invalid_no_commit(self): - self._fail_during_rows('invalid - no commit') - def test_invalid_no_cell_key_before_commit(self): self._fail_during_consume('invalid - no cell key before commit') @@ -727,9 +714,6 @@ def test_invalid_no_commit_between_rows(self): def test_invalid_no_commit_after_first_row(self): self._fail_during_consume('invalid - no commit after first row') - def test_invalid_last_row_missing_commit(self): - self._fail_during_rows('invalid - last row missing commit') - def test_invalid_duplicate_row_key(self): self._fail_during_consume('invalid - duplicate row key') @@ -751,21 +735,44 @@ def test_invalid_reset_with_chunk(self): def test_invalid_commit_with_chunk(self): self._fail_during_consume('invalid - commit with chunk') + # JSON Error cases: incomplete final row + + def _sort_flattend_cells(self, flattened): + import operator + key_func = operator.itemgetter('rk', 'fm', 'qual') + return sorted(flattened, key=key_func) + + def _incomplete_final_row(self, testcase_name): + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd.consume_next() + self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) + expected_result = self._sort_flattend_cells( + [result for result in results if not result['error']]) + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + self.assertEqual(flattened, expected_result) + + def test_invalid_no_commit(self): + self._incomplete_final_row('invalid - no commit') + + def test_invalid_last_row_missing_commit(self): + self._incomplete_final_row('invalid - last row missing commit') + # Non-error cases _marker = object() def _match_results(self, testcase_name, expected_result=_marker): - import operator - key_func = operator.itemgetter('rk', 'fm', 'qual') chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) prd = self._makeOne(iterator) prd.consume_next() - flattened = sorted(_flatten_cells(prd), key=key_func) + flattened = self._sort_flattend_cells(_flatten_cells(prd)) if expected_result is self._marker: - expected_result = sorted(results, key=key_func) + expected_result = self._sort_flattend_cells(results) self.assertEqual(flattened, expected_result) def test_bare_commit_implies_ts_zero(self): From 6b2f07f292e869d7619d17bee68ca6262658d7a6 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 27 Jun 2016 11:34:20 -0400 Subject: [PATCH 025/103] Distinguish response-level errors from invalid chunks. Verify that completed, non-error rows match expected results after an invalid chunk testcase. --- gcloud/bigtable/row_data.py | 14 +++++++++----- gcloud/bigtable/test_row_data.py | 22 +++++++++++++--------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py index b6cf55d98f0a..b6a52405f8b9 100644 --- a/gcloud/bigtable/row_data.py +++ b/gcloud/bigtable/row_data.py @@ -323,14 +323,18 @@ def consume_all(self, max_loops=None): break -class ReadRowsResponseError(RuntimeError): - """Exception raised to to invalid chunk / response data from back-end.""" +class InvalidReadRowsResponse(RuntimeError): + """Exception raised to to invalid response data from back-end.""" -def _raise_if(predicate): +class InvalidChunk(RuntimeError): + """Exception raised to to invalid chunk data from back-end.""" + + +def _raise_if(predicate, *args): """Helper for validation methods.""" if predicate: - raise ReadRowsResponseError() + raise InvalidChunk(*args) class PartialCellV2(object): @@ -533,7 +537,7 @@ def consume_next(self): if self._last_scanned_row_key is None: # first response if response.last_scanned_row_key: - raise ReadRowsResponseError() + raise InvalidReadRowsResponse() self._last_scanned_row_key = response.last_scanned_row_key diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index bfc660c8bded..2c3c9ba260f5 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -649,11 +649,11 @@ def test__save_row_no_cell(self): self.assertTrue(prd._rows[ROW_KEY] is row) def test_invalid_last_scanned_row_key_on_start(self): - from gcloud.bigtable.row_data import ReadRowsResponseError + from gcloud.bigtable.row_data import InvalidReadRowsResponse response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') iterator = _MockCancellableIterator(response) prd = self._makeOne(iterator) - with self.assertRaises(ReadRowsResponseError): + with self.assertRaises(InvalidReadRowsResponse): prd.consume_next() def test_valid_last_scanned_row_key_on_start(self): @@ -666,16 +666,16 @@ def test_valid_last_scanned_row_key_on_start(self): self.assertEqual(prd._last_scanned_row_key, 'AFTER') def test_invalid_empty_chunk(self): - from gcloud.bigtable.row_data import ReadRowsResponseError + from gcloud.bigtable.row_data import InvalidChunk chunks = _generate_cell_chunks(['']) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) prd = self._makeOne(iterator) - with self.assertRaises(ReadRowsResponseError): + with self.assertRaises(InvalidChunk): prd.consume_next() def test_invalid_empty_second_chunk(self): - from gcloud.bigtable.row_data import ReadRowsResponseError + from gcloud.bigtable.row_data import InvalidChunk chunks = _generate_cell_chunks(['', '']) first = chunks[0] first.row_key = b'RK' @@ -684,19 +684,23 @@ def test_invalid_empty_second_chunk(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) prd = self._makeOne(iterator) - with self.assertRaises(ReadRowsResponseError): + with self.assertRaises(InvalidChunk): prd.consume_next() # JSON Error cases: invalid chunks def _fail_during_consume(self, testcase_name): - from gcloud.bigtable.row_data import ReadRowsResponseError - chunks, _ = self._load_json_test(testcase_name) + from gcloud.bigtable.row_data import InvalidChunk + chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) prd = self._makeOne(iterator) - with self.assertRaises(ReadRowsResponseError): + with self.assertRaises(InvalidChunk): prd.consume_next() + expected_result = self._sort_flattend_cells( + [result for result in results if not result['error']]) + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + self.assertEqual(flattened, expected_result) def test_invalid_no_cell_key_before_commit(self): self._fail_during_consume('invalid - no cell key before commit') From c542ff8c1eaa4023daf38562a969c238983414e7 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 27 Jun 2016 11:46:26 -0400 Subject: [PATCH 026/103] Add BigQuery and Pub/Sub. --- docs/logging-usage.rst | 44 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/docs/logging-usage.rst b/docs/logging-usage.rst index f59fe9b7948c..dd4bd36196e9 100644 --- a/docs/logging-usage.rst +++ b/docs/logging-usage.rst @@ -206,11 +206,8 @@ Delete a metric: False -Export log entries using sinks ------------------------------- - -Sinks allow exporting entries which match a given filter to Cloud Storage -buckets, BigQuery datasets, or Cloud Pub/Sub topics. +Export to Cloud storage +======================= Make sure that the storage bucket you want to export logs too has `cloud-logs@google.com` as the owner. See `Set permission for writing exported logs`_. @@ -222,12 +219,43 @@ Add `cloud-logs@google.com` as the owner of `my-bucket-name`: >>> from gcloud import storage >>> client = storage.Client() >>> bucket = client.get_bucket('my-bucket-name') - >>> acl = bucket.acl - >>> acl.user('cloud-logs@google.com').grant_owner() - >>> acl.save() + >>> bucket.acl.reload() + >>> logs_group = bucket.acl.group('cloud-logs@google.com') + >>> logs_group.grant_owner() + >>> bucket.acl.add_entity(logs_group) + >>> bucket.acl.save() .. _Set permission for writing exported logs: https://cloud.google.com/logging/docs/export/configure_export#setting_product_name_short_permissions_for_writing_exported_logs + +Export to BigQuery +================== + +To export logs to BigQuery you must log into the Cloud Platform Console +and add `cloud-logs@google.com` to your project. + +See: `Setting permissions for BigQuery`_ + +.. _Setting permissions for BigQuery: https://cloud.google.com/logging/docs/export/configure_export#manual-access-bq + + +Export to Pub/Sub +================= + +To export logs to BigQuery you must log into the Cloud Platform Console +and add `cloud-logs@google.com` to your project. + +See: `Setting permissions for Pub/Sub`_ + +.. _Setting permissions for Pub/Sub: https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub + + +Export log entries using sinks +------------------------------ + +Sinks allow exporting entries which match a given filter to Cloud Storage +buckets, BigQuery datasets, or Cloud Pub/Sub topics. + Create a Cloud Storage sink: .. doctest:: From 6e199fd5aaf49cb647d9d9bc90ecc111d08199a0 Mon Sep 17 00:00:00 2001 From: "Brian J. Watson" Date: Mon, 27 Jun 2016 08:47:34 -0700 Subject: [PATCH 027/103] Fix LICENSE to match http://www.apache.org/licenses/LICENSE-2.0.txt Specifically, fix the brackets. All other changes are just whitespace. --- LICENSE | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index 4f73028109b8..d64569567334 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,5 @@ -Apache License + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -199,4 +200,3 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - From 3918082310f86d1494d63931e51f351fc5bc7b98 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 27 Jun 2016 11:53:26 -0400 Subject: [PATCH 028/103] Add example of fetting log entries. --- README.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.rst b/README.rst index faff3287a049..414d2dc53b3e 100644 --- a/README.rst +++ b/README.rst @@ -212,6 +212,13 @@ analyze, monitor, and alert on log data and events from Google Cloud Platform. logger = client.logger('log_name') logger.log_text("A simple entry") # API call +Example of fetching entries: + +.. code:: python + + entries, token = logger.list_entries() + for entry in entries: + print entry.payload See the ``gcloud-python`` API `logging documentation`_ to learn how to connect to Cloud logging using this Client Library. From 9ceac1e385e4e20f90ac1d774c89d053319f4fa1 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 27 Jun 2016 12:06:29 -0400 Subject: [PATCH 029/103] Add CSV and query example for BigQuery. --- README.rst | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/README.rst b/README.rst index b200cd009c63..0210ad7d32ce 100644 --- a/README.rst +++ b/README.rst @@ -174,12 +174,49 @@ append-only tables, using the processing power of Google's infrastructure. This package is still being implemented, but it is almost complete! +Load data from CSV +~~~~~~~~~~~~~~~~~~ + .. code:: python + + import csv + from gcloud import bigquery + from gcloud.bigquery import SchemaField + client = bigquery.Client() + dataset = client.dataset('dataset_name') dataset.create() # API request + SCHEMA = [ + SchemaField('full_name', 'STRING', mode='required'), + SchemaField('age', 'INTEGER', mode='required'), + ] + table = dataset.table('table_name', SCHEMA) + table.create() + + with open('csv_file', 'rb') as readable: + table.upload_from_file( + readable, source_format='CSV', skip_leading_rows=1) + +Perform a synchronous query +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: python + + # Perform a synchronous query. + QUERY = ( + 'SELECT name FROM [bigquery-public-data:usa_names.usa_1910_2013] ' + 'WHERE state = "TX"') + query = client.run_sync_query('%s LIMIT 100' % QUERY) + query.timeout_ms = TIMEOUT_MS + query.run() + + for row in query.rows: + print row + + See the ``gcloud-python`` API `BigQuery documentation`_ to learn how to connect to BigQuery using this Client Library. From 8916b6c6a8d7968d8b365821d45f7eb6cbb3fb4b Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 20:19:41 -0400 Subject: [PATCH 030/103] Factor out generating RowFilter-related protobufs. --- gcloud/bigtable/test_row_filters.py | 230 +++++++++++----------------- 1 file changed, 92 insertions(+), 138 deletions(-) diff --git a/gcloud/bigtable/test_row_filters.py b/gcloud/bigtable/test_row_filters.py index 768ffb79bd32..1ea7ecb2fc62 100644 --- a/gcloud/bigtable/test_row_filters.py +++ b/gcloud/bigtable/test_row_filters.py @@ -60,13 +60,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(sink=flag) + expected_pb = _RowFilterPB(sink=flag) self.assertEqual(pb_val, expected_pb) @@ -80,13 +77,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(pass_all_filter=flag) + expected_pb = _RowFilterPB(pass_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -100,13 +94,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(block_all_filter=flag) + expected_pb = _RowFilterPB(block_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -159,13 +150,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - regex = b'row-key-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(row_key_regex_filter=regex) + expected_pb = _RowFilterPB(row_key_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -196,13 +184,10 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - sample = 0.25 row_filter = self._makeOne(sample) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(row_sample_filter=sample) + expected_pb = _RowFilterPB(row_sample_filter=sample) self.assertEqual(pb_val, expected_pb) @@ -216,13 +201,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - regex = u'family-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(family_name_regex_filter=regex) + expected_pb = _RowFilterPB(family_name_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -236,13 +218,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - regex = b'column-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter( + expected_pb = _RowFilterPB( column_qualifier_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -288,9 +267,6 @@ def test___ne__same_value(self): def _to_pb_helper(self, start_micros=None, end_micros=None): import datetime from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - pb_kwargs = {} start = None @@ -303,7 +279,7 @@ def _to_pb_helper(self, start_micros=None, end_micros=None): pb_kwargs['end_timestamp_micros'] = end_micros time_range = self._makeOne(start=start, end=end) - expected_pb = data_v1_pb2.TimestampRange(**pb_kwargs) + expected_pb = _TimestampRangePB(**pb_kwargs) self.assertEqual(time_range.to_pb(), expected_pb) def test_to_pb(self): @@ -351,15 +327,13 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import TimestampRange range_ = TimestampRange() row_filter = self._makeOne(range_) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter( - timestamp_range_filter=data_v1_pb2.TimestampRange()) + expected_pb = _RowFilterPB( + timestamp_range_filter=_TimestampRangePB()) self.assertEqual(pb_val, expected_pb) @@ -434,71 +408,56 @@ def test___eq__type_differ(self): self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - column_family_id = u'column-family-id' row_filter = self._makeOne(column_family_id) - col_range_pb = data_v1_pb2.ColumnRange(family_name=column_family_id) - expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) + col_range_pb = _ColumnRangePB(family_name=column_family_id) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, start_column=column) - col_range_pb = data_v1_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, start_qualifier_inclusive=column, ) - expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, start_column=column, inclusive_start=False) - col_range_pb = data_v1_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, start_qualifier_exclusive=column, ) - expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, end_column=column) - col_range_pb = data_v1_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, end_qualifier_inclusive=column, ) - expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, end_column=column, inclusive_end=False) - col_range_pb = data_v1_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, end_qualifier_exclusive=column, ) - expected_pb = data_v1_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -512,13 +471,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - regex = b'value-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(value_regex_filter=regex) + expected_pb = _RowFilterPB(value_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -579,52 +535,37 @@ def test___eq__type_differ(self): self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - row_filter = self._makeOne() - expected_pb = data_v1_pb2.RowFilter( - value_range_filter=data_v1_pb2.ValueRange()) + expected_pb = _RowFilterPB( + value_range_filter=_ValueRangePB()) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - value = b'some-value' row_filter = self._makeOne(start_value=value) - val_range_pb = data_v1_pb2.ValueRange(start_value_inclusive=value) - expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(start_value_inclusive=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - value = b'some-value' row_filter = self._makeOne(start_value=value, inclusive_start=False) - val_range_pb = data_v1_pb2.ValueRange(start_value_exclusive=value) - expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(start_value_exclusive=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - value = b'some-value' row_filter = self._makeOne(end_value=value) - val_range_pb = data_v1_pb2.ValueRange(end_value_inclusive=value) - expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(end_value_inclusive=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - value = b'some-value' row_filter = self._makeOne(end_value=value, inclusive_end=False) - val_range_pb = data_v1_pb2.ValueRange(end_value_exclusive=value) - expected_pb = data_v1_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(end_value_exclusive=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -672,13 +613,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - num_cells = 76 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter( + expected_pb = _RowFilterPB( cells_per_row_offset_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -693,13 +631,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - num_cells = 189 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter( + expected_pb = _RowFilterPB( cells_per_row_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -714,13 +649,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - num_cells = 10 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter( + expected_pb = _RowFilterPB( cells_per_column_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -735,13 +667,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(strip_value_transformer=flag) + expected_pb = _RowFilterPB(strip_value_transformer=flag) self.assertEqual(pb_val, expected_pb) @@ -772,13 +701,10 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - label = u'label' row_filter = self._makeOne(label) pb_val = row_filter.to_pb() - expected_pb = data_v1_pb2.RowFilter(apply_label_transformer=label) + expected_pb = _RowFilterPB(apply_label_transformer=label) self.assertEqual(pb_val, expected_pb) @@ -823,8 +749,6 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -837,16 +761,14 @@ def test_to_pb(self): row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() - expected_pb = data_v1_pb2.RowFilter( - chain=data_v1_pb2.RowFilter.Chain( + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB( filters=[row_filter1_pb, row_filter2_pb], ), ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import CellsRowLimitFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -863,8 +785,8 @@ def test_to_pb_nested(self): row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() - expected_pb = data_v1_pb2.RowFilter( - chain=data_v1_pb2.RowFilter.Chain( + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB( filters=[row_filter3_pb, row_filter4_pb], ), ) @@ -881,8 +803,6 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -895,16 +815,14 @@ def test_to_pb(self): row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() - expected_pb = data_v1_pb2.RowFilter( - interleave=data_v1_pb2.RowFilter.Interleave( + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB( filters=[row_filter1_pb, row_filter2_pb], ), ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import CellsRowLimitFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -921,8 +839,8 @@ def test_to_pb_nested(self): row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() - expected_pb = data_v1_pb2.RowFilter( - interleave=data_v1_pb2.RowFilter.Interleave( + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB( filters=[row_filter3_pb, row_filter4_pb], ), ) @@ -972,8 +890,6 @@ def test___eq__type_differ(self): self.assertNotEqual(cond_filter1, cond_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import CellsRowOffsetFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -991,8 +907,8 @@ def test_to_pb(self): false_filter=row_filter3) filter_pb = row_filter4.to_pb() - expected_pb = data_v1_pb2.RowFilter( - condition=data_v1_pb2.RowFilter.Condition( + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( predicate_filter=row_filter1_pb, true_filter=row_filter2_pb, false_filter=row_filter3_pb, @@ -1001,8 +917,6 @@ def test_to_pb(self): self.assertEqual(filter_pb, expected_pb) def test_to_pb_true_only(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -1015,8 +929,8 @@ def test_to_pb_true_only(self): row_filter3 = self._makeOne(row_filter1, true_filter=row_filter2) filter_pb = row_filter3.to_pb() - expected_pb = data_v1_pb2.RowFilter( - condition=data_v1_pb2.RowFilter.Condition( + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( predicate_filter=row_filter1_pb, true_filter=row_filter2_pb, ), @@ -1024,8 +938,6 @@ def test_to_pb_true_only(self): self.assertEqual(filter_pb, expected_pb) def test_to_pb_false_only(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -1038,10 +950,52 @@ def test_to_pb_false_only(self): row_filter3 = self._makeOne(row_filter1, false_filter=row_filter2) filter_pb = row_filter3.to_pb() - expected_pb = data_v1_pb2.RowFilter( - condition=data_v1_pb2.RowFilter.Condition( + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( predicate_filter=row_filter1_pb, false_filter=row_filter2_pb, ), ) self.assertEqual(filter_pb, expected_pb) + + +def _ColumnRangePB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) + return data_v1_pb2.ColumnRange(*args, **kw) + + +def _RowFilterPB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) + return data_v1_pb2.RowFilter(*args, **kw) + + +def _RowFilterChainPB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) + return data_v1_pb2.RowFilter.Chain(*args, **kw) + + +def _RowFilterConditionPB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) + return data_v1_pb2.RowFilter.Condition(*args, **kw) + + +def _RowFilterInterleavePB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) + return data_v1_pb2.RowFilter.Interleave(*args, **kw) + + +def _TimestampRangePB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) + return data_v1_pb2.TimestampRange(*args, **kw) + + +def _ValueRangePB(*args, **kw): + from gcloud.bigtable._generated import ( + bigtable_data_pb2 as data_v1_pb2) + return data_v1_pb2.ValueRange(*args, **kw) From 1e4405eff5c4cc34870d5c6e0082728091631af0 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 27 Jun 2016 12:17:51 -0400 Subject: [PATCH 031/103] Add pubsub and bigquery code examples for logging. --- docs/logging-usage.rst | 45 +++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/docs/logging-usage.rst b/docs/logging-usage.rst index dd4bd36196e9..ec8f157cec5c 100644 --- a/docs/logging-usage.rst +++ b/docs/logging-usage.rst @@ -205,9 +205,14 @@ Delete a metric: >>> metric.exists() # API call False +Export log entries using sinks +------------------------------ + +Sinks allow exporting entries which match a given filter to Cloud Storage +buckets, BigQuery datasets, or Cloud Pub/Sub topics. Export to Cloud storage -======================= +~~~~~~~~~~~~~~~~~~~~~~~ Make sure that the storage bucket you want to export logs too has `cloud-logs@google.com` as the owner. See `Set permission for writing exported logs`_. @@ -227,34 +232,46 @@ Add `cloud-logs@google.com` as the owner of `my-bucket-name`: .. _Set permission for writing exported logs: https://cloud.google.com/logging/docs/export/configure_export#setting_product_name_short_permissions_for_writing_exported_logs - Export to BigQuery -================== +~~~~~~~~~~~~~~~~~~ To export logs to BigQuery you must log into the Cloud Platform Console -and add `cloud-logs@google.com` to your project. +and add `cloud-logs@google.com` to a dataset. See: `Setting permissions for BigQuery`_ -.. _Setting permissions for BigQuery: https://cloud.google.com/logging/docs/export/configure_export#manual-access-bq +.. doctest:: + >>> from gcloud import bigquery + >>> from gcloud.bigquery.dataset import AccessGrant + >>> bigquery_client = bigquery.Client() + >>> dataset = bigquery_client.dataset('my-dataset-name') + >>> dataset.create() + >>> dataset.reload() + >>> grants = dataset.access_grants + >>> grants.append(AccessGrant( + ... 'WRITER', 'groupByEmail', 'cloud-logs@google.com'))) + >>> dataset.access_grants = grants + >>> dataset.update() +.. _Setting permissions for BigQuery: https://cloud.google.com/logging/docs/export/configure_export#manual-access-bq Export to Pub/Sub -================= +~~~~~~~~~~~~~~~~~ To export logs to BigQuery you must log into the Cloud Platform Console -and add `cloud-logs@google.com` to your project. +and add `cloud-logs@google.com` to a topic. See: `Setting permissions for Pub/Sub`_ -.. _Setting permissions for Pub/Sub: https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub - - -Export log entries using sinks ------------------------------- +.. doctest:: + >>> from gcloud import pubsub + >>> client = pubsub.Client() + >>> topic = client.topic('your-topic-name') + >>> policy = top.get_iam_policy() + >>> policy.owners.add(policy.group('cloud-logs@google.com')) + >>> topic.set_iam_policy(policy) -Sinks allow exporting entries which match a given filter to Cloud Storage -buckets, BigQuery datasets, or Cloud Pub/Sub topics. +.. _Setting permissions for Pub/Sub: https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub Create a Cloud Storage sink: From 1b0fb536715a05aab0100b81d85e083e36df057d Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sun, 26 Jun 2016 23:50:50 -0400 Subject: [PATCH 032/103] Convert to non-instance-admin Bigtable V2 protos. Folds new ReadRowsResponse logic from #1907, #1915 into table row handling. --- gcloud/bigtable/row.py | 40 +-- gcloud/bigtable/row_data.py | 408 ++++++++++------------------ gcloud/bigtable/row_filters.py | 114 ++++---- gcloud/bigtable/table.py | 121 +++------ gcloud/bigtable/test_row.py | 213 +++++++++------ gcloud/bigtable/test_row_data.py | 328 +++------------------- gcloud/bigtable/test_row_filters.py | 58 ++-- gcloud/bigtable/test_table.py | 264 +++++++++--------- 8 files changed, 593 insertions(+), 953 deletions(-) diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py index 1dbd38aa7962..aae048b0c7d6 100644 --- a/gcloud/bigtable/row.py +++ b/gcloud/bigtable/row.py @@ -22,10 +22,10 @@ from gcloud._helpers import _datetime_from_microseconds from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) _PACK_I64 = struct.Struct('>q').pack @@ -134,13 +134,13 @@ def _set_cell(self, column_family_id, column, value, timestamp=None, # Truncate to millisecond granularity. timestamp_micros -= (timestamp_micros % 1000) - mutation_val = data_v1_pb2.Mutation.SetCell( + mutation_val = data_v2_pb2.Mutation.SetCell( family_name=column_family_id, column_qualifier=column, timestamp_micros=timestamp_micros, value=value, ) - mutation_pb = data_v1_pb2.Mutation(set_cell=mutation_val) + mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) self._get_mutations(state).append(mutation_pb) def _delete(self, state=None): @@ -156,8 +156,8 @@ def _delete(self, state=None): :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. """ - mutation_val = data_v1_pb2.Mutation.DeleteFromRow() - mutation_pb = data_v1_pb2.Mutation(delete_from_row=mutation_val) + mutation_val = data_v2_pb2.Mutation.DeleteFromRow() + mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) self._get_mutations(state).append(mutation_pb) def _delete_cells(self, column_family_id, columns, time_range=None, @@ -188,10 +188,10 @@ def _delete_cells(self, column_family_id, columns, time_range=None, """ mutations_list = self._get_mutations(state) if columns is self.ALL_COLUMNS: - mutation_val = data_v1_pb2.Mutation.DeleteFromFamily( + mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( family_name=column_family_id, ) - mutation_pb = data_v1_pb2.Mutation(delete_from_family=mutation_val) + mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) mutations_list.append(mutation_pb) else: delete_kwargs = {} @@ -207,9 +207,9 @@ def _delete_cells(self, column_family_id, columns, time_range=None, family_name=column_family_id, column_qualifier=column, ) - mutation_val = data_v1_pb2.Mutation.DeleteFromColumn( + mutation_val = data_v2_pb2.Mutation.DeleteFromColumn( **delete_kwargs) - mutation_pb = data_v1_pb2.Mutation( + mutation_pb = data_v2_pb2.Mutation( delete_from_column=mutation_val) to_append.append(mutation_pb) @@ -389,7 +389,7 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total mutations exceed the maximum allowable ' '%d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_v1_pb2.MutateRowRequest( + request_pb = messages_v2_pb2.MutateRowRequest( table_name=self._table.name, row_key=self._row_key, mutations=mutations_list, @@ -504,14 +504,14 @@ def commit(self): 'mutations and %d false mutations.' % ( MAX_MUTATIONS, num_true_mutations, num_false_mutations)) - request_pb = messages_v1_pb2.CheckAndMutateRowRequest( + request_pb = messages_v2_pb2.CheckAndMutateRowRequest( table_name=self._table.name, row_key=self._row_key, predicate_filter=self._filter.to_pb(), true_mutations=true_mutations, false_mutations=false_mutations, ) - # We expect a `.messages_v1_pb2.CheckAndMutateRowResponse` + # We expect a `.messages_v2_pb2.CheckAndMutateRowResponse` client = self._table._cluster._client resp = client._data_stub.CheckAndMutateRow( request_pb, client.timeout_seconds) @@ -701,7 +701,7 @@ def append_cell_value(self, column_family_id, column, value): """ column = _to_bytes(column) value = _to_bytes(value) - rule_pb = data_v1_pb2.ReadModifyWriteRule( + rule_pb = data_v2_pb2.ReadModifyWriteRule( family_name=column_family_id, column_qualifier=column, append_value=value) @@ -738,7 +738,7 @@ def increment_cell_value(self, column_family_id, column, int_value): will fail. """ column = _to_bytes(column) - rule_pb = data_v1_pb2.ReadModifyWriteRule( + rule_pb = data_v2_pb2.ReadModifyWriteRule( family_name=column_family_id, column_qualifier=column, increment_amount=int_value) @@ -794,12 +794,12 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total append mutations exceed the maximum ' 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_v1_pb2.ReadModifyWriteRowRequest( + request_pb = messages_v2_pb2.ReadModifyWriteRowRequest( table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list, ) - # We expect a `.data_v1_pb2.Row` + # We expect a `.data_v2_pb2.Row` client = self._table._cluster._client row_response = client._data_stub.ReadModifyWriteRow( request_pb, client.timeout_seconds) @@ -814,7 +814,7 @@ def commit(self): def _parse_rmw_row_response(row_response): """Parses the response to a ``ReadModifyWriteRow`` request. - :type row_response: :class:`.data_v1_pb2.Row` + :type row_response: :class:`.data_v2_pb2.Row` :param row_response: The response row (with only modified cells) from a ``ReadModifyWriteRow`` request. diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py index b6a52405f8b9..e353b8735ba9 100644 --- a/gcloud/bigtable/row_data.py +++ b/gcloud/bigtable/row_data.py @@ -67,6 +67,49 @@ def __ne__(self, other): return not self.__eq__(other) +class PartialCellData(object): + """Representation of partial cell in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) cell. + + :type family_name: str + :param family_name: The family name of the (partial) cell. + + :type qualifier: bytes + :param qualifier: The column qualifier of the (partial) cell. + + :type timestamp_micros: int + :param timestamp_micros: The timestamp (in microsecods) of the + (partial) cell. + + :type labels: list of str + :param labels: labels assigned to the (partial) cell + + :type value: bytes + :param value: The (accumulated) value of the (partial) cell. + """ + def __init__(self, row_key, family_name, qualifier, timestamp_micros, + labels=(), value=b''): + self.row_key = row_key + self.family_name = family_name + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels + self.value = value + + def append_value(self, value): + """Append bytes from a new chunk to value. + + :type value: bytes + :param value: bytes to append + """ + self.value += value + + class PartialRowData(object): """Representation of partial row in a Google Cloud Bigtable Table. @@ -80,15 +123,11 @@ class PartialRowData(object): def __init__(self, row_key): self._row_key = row_key self._cells = {} - self._committed = False - self._chunks_encountered = False def __eq__(self, other): if not isinstance(other, self.__class__): return False return (other._row_key == self._row_key and - other._committed == self._committed and - other._chunks_encountered == self._chunks_encountered and other._cells == self._cells) def __ne__(self, other): @@ -132,119 +171,13 @@ def row_key(self): """ return self._row_key - @property - def committed(self): - """Getter for the committed status of the (partial) row. - - :rtype: bool - :returns: The committed status of the (partial) row. - """ - return self._committed - def clear(self): - """Clears all cells that have been added.""" - self._committed = False - self._chunks_encountered = False - self._cells.clear() - - def _handle_commit_row(self, chunk, index, last_chunk_index): - """Handles a ``commit_row`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - - :type index: int - :param index: The current index of the chunk. - - :type last_chunk_index: int - :param last_chunk_index: The index of the last chunk. - - :raises: :class:`ValueError ` if the value of - ``commit_row`` is :data:`False` or if the chunk passed is not - the last chunk in a response. - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``commit_row``. - if not chunk.commit_row: - raise ValueError('Received commit_row that was False.') - - if index != last_chunk_index: - raise ValueError('Commit row chunk was not the last chunk') - else: - self._committed = True - - def _handle_reset_row(self, chunk): - """Handles a ``reset_row`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - - :raises: :class:`ValueError ` if the value of - ``reset_row`` is :data:`False` - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``reset_row``. - if not chunk.reset_row: - raise ValueError('Received reset_row that was False.') - - self.clear() - - def _handle_row_contents(self, chunk): - """Handles a ``row_contents`` chunk. +class InvalidReadRowsResponse(RuntimeError): + """Exception raised to to invalid response data from back-end.""" - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``row_contents``. - - # chunk.row_contents is ._generated.bigtable_data_pb2.Family - column_family_id = chunk.row_contents.name - column_family_dict = self._cells.setdefault(column_family_id, {}) - for column in chunk.row_contents.columns: - cells = [Cell.from_pb(cell) for cell in column.cells] - - column_name = column.qualifier - column_cells = column_family_dict.setdefault(column_name, []) - column_cells.extend(cells) - - def update_from_read_rows(self, read_rows_response_pb): - """Updates the current row from a ``ReadRows`` response. - - :type read_rows_response_pb: - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - :param read_rows_response_pb: A response streamed back as part of a - ``ReadRows`` request. - - :raises: :class:`ValueError ` if the current - partial row has already been committed, if the row key on the - response doesn't match the current one or if there is a chunk - encountered with an unexpected ``ONEOF`` protobuf property. - """ - if self._committed: - raise ValueError('The row has been committed') - - if read_rows_response_pb.row_key != self.row_key: - raise ValueError('Response row key (%r) does not match current ' - 'one (%r).' % (read_rows_response_pb.row_key, - self.row_key)) - - last_chunk_index = len(read_rows_response_pb.chunks) - 1 - for index, chunk in enumerate(read_rows_response_pb.chunks): - chunk_property = chunk.WhichOneof('chunk') - if chunk_property == 'row_contents': - self._handle_row_contents(chunk) - elif chunk_property == 'reset_row': - self._handle_reset_row(chunk) - elif chunk_property == 'commit_row': - self._handle_commit_row(chunk, index, last_chunk_index) - else: - # NOTE: This includes chunk_property == None since we always - # want a value to be set - raise ValueError('Unexpected chunk property: %s' % ( - chunk_property,)) - self._chunks_encountered = True +class InvalidChunk(RuntimeError): + """Exception raised to to invalid chunk data from back-end.""" class PartialRowsData(object): @@ -255,11 +188,27 @@ class PartialRowsData(object): :param response_iterator: A streaming iterator returned from a ``ReadRows`` request. """ + START = "Start" # No responses yet processed. + NEW_ROW = "New row" # No cells yet complete for row + ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row + CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row def __init__(self, response_iterator): - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` self._response_iterator = response_iterator + # Fully-processed rows, keyed by `row_key` self._rows = {} + # Counter for responses pulled from iterator + self._counter = 0 + # Maybe cached from previous response + self._last_scanned_row_key = None + # In-progress row, unset until first response, after commit/reset + self._row = None + # Last complete row, unset until first commit + self._previous_row = None + # In-progress cell, unset until first response, after completion + self._cell = None + # Last complete cell, unset until first completion, after new row + self._previous_cell = None def __eq__(self, other): if not isinstance(other, self.__class__): @@ -269,12 +218,32 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) + @property + def state(self): + """State machine state. + + :rtype: str + :returns: name of state corresponding to currrent row / chunk + processing. + """ + if self._last_scanned_row_key is None: + return self.START + if self._row is None: + assert self._cell is None + assert self._previous_cell is None + return self.NEW_ROW + if self._cell is not None: + return self.CELL_IN_PROGRESS + if self._previous_cell is not None: + return self.ROW_IN_PROGRESS + return self.NEW_ROW # row added, no chunk yet processed + @property def rows(self): """Property returning all rows accumulated from the stream. :rtype: dict - :returns: Dictionary of :class:`PartialRowData`. + :returns: row_key -> :class:`PartialRowData`. """ # NOTE: To avoid duplicating large objects, this is just the # mutable private data. @@ -285,21 +254,55 @@ def cancel(self): self._response_iterator.cancel() def consume_next(self): - """Consumes the next ``ReadRowsResponse`` from the stream. - - Parses the response and stores it as a :class:`PartialRowData` - in a dictionary owned by this object. + """Consume the next ``ReadRowsResponse`` from the stream. - :raises: :class:`StopIteration ` if the - response iterator has no more responses to stream. + Parse the response and its chunks into a new/existing row in + :attr:`_rows` """ - read_rows_response = self._response_iterator.next() - row_key = read_rows_response.row_key - partial_row = self._rows.get(row_key) - if partial_row is None: - partial_row = self._rows[row_key] = PartialRowData(row_key) - # NOTE: This is not atomic in the case of failures. - partial_row.update_from_read_rows(read_rows_response) + response = six.next(self._response_iterator) + self._counter += 1 + + if self._last_scanned_row_key is None: # first response + if response.last_scanned_row_key: + raise InvalidReadRowsResponse() + + self._last_scanned_row_key = response.last_scanned_row_key + + row = self._row + cell = self._cell + + for chunk in response.chunks: + + self._validate_chunk(chunk) + + if chunk.reset_row: + row = self._row = None + cell = self._cell = self._previous_cell = None + continue + + if row is None: + row = self._row = PartialRowData(chunk.row_key) + + if cell is None: + cell = self._cell = PartialCellData( + chunk.row_key, + chunk.family_name.value, + chunk.qualifier.value, + chunk.timestamp_micros, + chunk.labels, + chunk.value) + self._copy_from_previous(cell) + else: + cell.append_value(chunk.value) + + if chunk.commit_row: + self._save_current_row() + row = cell = None + continue + + if chunk.value_size == 0: + self._save_current_cell() + cell = None def consume_all(self, max_loops=None): """Consume the streamed responses until there are no more. @@ -322,100 +325,6 @@ def consume_all(self, max_loops=None): except StopIteration: break - -class InvalidReadRowsResponse(RuntimeError): - """Exception raised to to invalid response data from back-end.""" - - -class InvalidChunk(RuntimeError): - """Exception raised to to invalid chunk data from back-end.""" - - -def _raise_if(predicate, *args): - """Helper for validation methods.""" - if predicate: - raise InvalidChunk(*args) - - -class PartialCellV2(object): - """Data for a not-yet-complete cell.""" - - def __init__(self, row_key, family_name, qualifier, timestamp_micros, - labels=(), value=b''): - self.row_key = row_key - self.family_name = family_name - self.qualifier = qualifier - self.timestamp_micros = timestamp_micros - self.labels = labels - self.value = value - - def append_value(self, value): - """Append bytes from a new chunk to value. - - :type value: bytes - :param value: bytes to append - """ - self.value += value - - -class PartialRowsDataV2(object): - """Handle state involved in consuming a ``ReadRows`` streaming response. - - :type response_iterator: - :class:`grpc.framework.alpha._reexport._CancellableIterator` returning - :class:`gcloud.bigtable._generated_v2.bigtable_pb2.ReadRowsResponse` - :param response_iterator: - A streaming iterator returned from a ``ReadRows`` request. - """ - # State names - START = "Start" - NEW_ROW = "New row" - ROW_IN_PROGRESS = "Row in progress" - CELL_IN_PROGRESS = "Cell in progress" - - def __init__(self, response_iterator): - self._response_iterator = response_iterator - # Fully-processed rows, keyed by `row_key` - self._rows = {} - # Counter for responses pulled from iterator - self._counter = 0 - # Maybe cached from previous response - self._last_scanned_row_key = None - # In-progress row, unset until first response, after commit/reset - self._row = None - # Last complete row, unset until first commit - self._previous_row = None - # In-progress cell, unset until first response, after completion - self._cell = None - # Last complete cell, unset until first completion, after new row - self._previous_cell = None - - @property - def state(self): - """Name of state machine state.""" - if self._last_scanned_row_key is None: - return self.START - if self._row is None: - assert self._cell is None - assert self._previous_cell is None - return self.NEW_ROW - if self._cell is not None: - return self.CELL_IN_PROGRESS - if self._previous_cell is not None: - return self.ROW_IN_PROGRESS - return self.NEW_ROW # row added, no chunk yet processed - - @property - def rows(self): - """Property returning all rows accumulated from the stream. - - :rtype: dict - :returns: Dictionary of :class:`PartialRowData`. - """ - # NOTE: To avoid duplicating large objects, this is just the - # mutable private data. - return self._rows - @staticmethod def _validate_chunk_status(chunk): """Helper for :meth:`_validate_chunk_row_in_progress`, etc.""" @@ -526,53 +435,8 @@ def _save_current_row(self): self._row, self._previous_row = None, self._row self._previous_cell = None - def consume_next(self): - """Consume the next ``ReadRowsResponse`` from the stream. - - Parse the response and its chunks into a new/existing row in - :attr:`_rows` - """ - response = self._response_iterator.next() - self._counter += 1 - - if self._last_scanned_row_key is None: # first response - if response.last_scanned_row_key: - raise InvalidReadRowsResponse() - - self._last_scanned_row_key = response.last_scanned_row_key - - row = self._row - cell = self._cell - - for chunk in response.chunks: - - self._validate_chunk(chunk) - - if chunk.reset_row: - row = self._row = None - cell = self._cell = self._previous_cell = None - continue - - if row is None: - row = self._row = PartialRowData(chunk.row_key) - - if cell is None: - cell = self._cell = PartialCellV2( - chunk.row_key, - chunk.family_name.value, - chunk.qualifier.value, - chunk.timestamp_micros, - chunk.labels, - chunk.value) - self._copy_from_previous(cell) - else: - cell.append_value(chunk.value) - - if chunk.commit_row: - self._save_current_row() - row = cell = None - continue - if chunk.value_size == 0: - self._save_current_cell() - cell = None +def _raise_if(predicate, *args): + """Helper for validation methods.""" + if predicate: + raise InvalidChunk(*args) diff --git a/gcloud/bigtable/row_filters.py b/gcloud/bigtable/row_filters.py index 2b11a06bfdd9..f76615ba5ea8 100644 --- a/gcloud/bigtable/row_filters.py +++ b/gcloud/bigtable/row_filters.py @@ -17,8 +17,8 @@ from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) class RowFilter(object): @@ -66,10 +66,10 @@ class SinkFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(sink=self.flag) + return data_v2_pb2.RowFilter(sink=self.flag) class PassAllFilter(_BoolFilter): @@ -84,10 +84,10 @@ class PassAllFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(pass_all_filter=self.flag) + return data_v2_pb2.RowFilter(pass_all_filter=self.flag) class BlockAllFilter(_BoolFilter): @@ -101,10 +101,10 @@ class BlockAllFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(block_all_filter=self.flag) + return data_v2_pb2.RowFilter(block_all_filter=self.flag) class _RegexFilter(RowFilter): @@ -154,10 +154,10 @@ class RowKeyRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(row_key_regex_filter=self.regex) + return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex) class RowSampleFilter(RowFilter): @@ -179,10 +179,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(row_sample_filter=self.sample) + return data_v2_pb2.RowFilter(row_sample_filter=self.sample) class FamilyNameRegexFilter(_RegexFilter): @@ -203,10 +203,10 @@ class FamilyNameRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(family_name_regex_filter=self.regex) + return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex) class ColumnQualifierRegexFilter(_RegexFilter): @@ -233,10 +233,10 @@ class ColumnQualifierRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(column_qualifier_regex_filter=self.regex) + return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex) class TimestampRange(object): @@ -267,7 +267,7 @@ def __ne__(self, other): def to_pb(self): """Converts the :class:`TimestampRange` to a protobuf. - :rtype: :class:`.data_v1_pb2.TimestampRange` + :rtype: :class:`.data_v2_pb2.TimestampRange` :returns: The converted current object. """ timestamp_range_kwargs = {} @@ -277,7 +277,7 @@ def to_pb(self): if self.end is not None: timestamp_range_kwargs['end_timestamp_micros'] = ( _microseconds_from_datetime(self.end)) - return data_v1_pb2.TimestampRange(**timestamp_range_kwargs) + return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) class TimestampRangeFilter(RowFilter): @@ -301,10 +301,10 @@ def to_pb(self): First converts the ``range_`` on the current object to a protobuf and then uses it in the ``timestamp_range_filter`` field. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter( + return data_v2_pb2.RowFilter( timestamp_range_filter=self.range_.to_pb()) @@ -377,28 +377,28 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - First converts to a :class:`.data_v1_pb2.ColumnRange` and then uses it + First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it in the ``column_range_filter`` field. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ column_range_kwargs = {'family_name': self.column_family_id} if self.start_column is not None: if self.inclusive_start: - key = 'start_qualifier_inclusive' + key = 'start_qualifier_closed' else: - key = 'start_qualifier_exclusive' + key = 'start_qualifier_open' column_range_kwargs[key] = _to_bytes(self.start_column) if self.end_column is not None: if self.inclusive_end: - key = 'end_qualifier_inclusive' + key = 'end_qualifier_closed' else: - key = 'end_qualifier_exclusive' + key = 'end_qualifier_open' column_range_kwargs[key] = _to_bytes(self.end_column) - column_range = data_v1_pb2.ColumnRange(**column_range_kwargs) - return data_v1_pb2.RowFilter(column_range_filter=column_range) + column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) + return data_v2_pb2.RowFilter(column_range_filter=column_range) class ValueRegexFilter(_RegexFilter): @@ -425,10 +425,10 @@ class ValueRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(value_regex_filter=self.regex) + return data_v2_pb2.RowFilter(value_regex_filter=self.regex) class ValueRangeFilter(RowFilter): @@ -494,28 +494,28 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - First converts to a :class:`.data_v1_pb2.ValueRange` and then uses + First converts to a :class:`.data_v2_pb2.ValueRange` and then uses it to create a row filter protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ value_range_kwargs = {} if self.start_value is not None: if self.inclusive_start: - key = 'start_value_inclusive' + key = 'start_value_closed' else: - key = 'start_value_exclusive' + key = 'start_value_open' value_range_kwargs[key] = _to_bytes(self.start_value) if self.end_value is not None: if self.inclusive_end: - key = 'end_value_inclusive' + key = 'end_value_closed' else: - key = 'end_value_exclusive' + key = 'end_value_open' value_range_kwargs[key] = _to_bytes(self.end_value) - value_range = data_v1_pb2.ValueRange(**value_range_kwargs) - return data_v1_pb2.RowFilter(value_range_filter=value_range) + value_range = data_v2_pb2.ValueRange(**value_range_kwargs) + return data_v2_pb2.RowFilter(value_range_filter=value_range) class _CellCountFilter(RowFilter): @@ -547,10 +547,10 @@ class CellsRowOffsetFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter( + return data_v2_pb2.RowFilter( cells_per_row_offset_filter=self.num_cells) @@ -564,10 +564,10 @@ class CellsRowLimitFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) + return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) class CellsColumnLimitFilter(_CellCountFilter): @@ -582,10 +582,10 @@ class CellsColumnLimitFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter( + return data_v2_pb2.RowFilter( cells_per_column_limit_filter=self.num_cells) @@ -601,10 +601,10 @@ class StripValueTransformerFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(strip_value_transformer=self.flag) + return data_v2_pb2.RowFilter(strip_value_transformer=self.flag) class ApplyLabelFilter(RowFilter): @@ -637,10 +637,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v1_pb2.RowFilter(apply_label_transformer=self.label) + return data_v2_pb2.RowFilter(apply_label_transformer=self.label) class _FilterCombination(RowFilter): @@ -679,12 +679,12 @@ class RowFilterChain(_FilterCombination): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - chain = data_v1_pb2.RowFilter.Chain( + chain = data_v2_pb2.RowFilter.Chain( filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_v1_pb2.RowFilter(chain=chain) + return data_v2_pb2.RowFilter(chain=chain) class RowFilterUnion(_FilterCombination): @@ -703,12 +703,12 @@ class RowFilterUnion(_FilterCombination): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - interleave = data_v1_pb2.RowFilter.Interleave( + interleave = data_v2_pb2.RowFilter.Interleave( filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_v1_pb2.RowFilter(interleave=interleave) + return data_v2_pb2.RowFilter(interleave=interleave) class ConditionalRowFilter(RowFilter): @@ -756,7 +756,7 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_v1_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ condition_kwargs = {'predicate_filter': self.base_filter.to_pb()} @@ -764,5 +764,5 @@ def to_pb(self): condition_kwargs['true_filter'] = self.true_filter.to_pb() if self.false_filter is not None: condition_kwargs['false_filter'] = self.false_filter.to_pb() - condition = data_v1_pb2.RowFilter.Condition(**condition_kwargs) - return data_v1_pb2.RowFilter(condition=condition) + condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) + return data_v2_pb2.RowFilter(condition=condition) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 155b5123c67f..83182d9f2a04 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -14,20 +14,16 @@ """User friendly container for Google Cloud Bigtable Table.""" - from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as data_messages_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as data_messages_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) from gcloud.bigtable.column_family import _gc_rule_from_pb from gcloud.bigtable.column_family import ColumnFamily from gcloud.bigtable.row import AppendRow from gcloud.bigtable.row import ConditionalRow from gcloud.bigtable.row import DirectRow -from gcloud.bigtable.row_data import PartialRowData from gcloud.bigtable.row_data import PartialRowsData @@ -168,8 +164,12 @@ def create(self, initial_split_keys=None): created, spanning the key ranges: ``[, s1)``, ``[s1, s2)``, ``[s2, )``. """ - request_pb = messages_v1_pb2.CreateTableRequest( - initial_split_keys=initial_split_keys or [], + split_pb = table_admin_messages_v2_pb2.CreateTableRequest.Split + if initial_split_keys is not None: + initial_split_keys = [ + split_pb(key=key) for key in initial_split_keys] + request_pb = table_admin_messages_v2_pb2.CreateTableRequest( + initial_splits=initial_split_keys or [], name=self._cluster.name, table_id=self.table_id, ) @@ -179,7 +179,8 @@ def create(self, initial_split_keys=None): def delete(self): """Delete this table.""" - request_pb = messages_v1_pb2.DeleteTableRequest(name=self.name) + request_pb = table_admin_messages_v2_pb2.DeleteTableRequest( + name=self.name) client = self._cluster._client # We expect a `google.protobuf.empty_pb2.Empty` client._table_stub.DeleteTable(request_pb, client.timeout_seconds) @@ -195,7 +196,8 @@ def list_column_families(self): family name from the response does not agree with the computed name from the column family ID. """ - request_pb = messages_v1_pb2.GetTableRequest(name=self.name) + request_pb = table_admin_messages_v2_pb2.GetTableRequest( + name=self.name) client = self._cluster._client # We expect a `._generated.bigtable_table_data_pb2.Table` table_pb = client._table_stub.GetTable(request_pb, @@ -206,10 +208,6 @@ def list_column_families(self): gc_rule = _gc_rule_from_pb(value_pb.gc_rule) column_family = self.column_family(column_family_id, gc_rule=gc_rule) - if column_family.name != value_pb.name: - raise ValueError('Column family name %s does not agree with ' - 'name from request: %s.' % ( - column_family.name, value_pb.name)) result[column_family_id] = column_family return result @@ -234,21 +232,18 @@ def read_row(self, row_key, filter_=None): client = self._cluster._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) - # We expect an iterator of `data_messages_v1_pb2.ReadRowsResponse` - result = PartialRowData(row_key) - for read_rows_response in response_iterator: - result.update_from_read_rows(read_rows_response) + rows_data = PartialRowsData(response_iterator) + rows_data.consume_all() + if rows_data.state != rows_data.NEW_ROW: + raise ValueError('The row remains partial / is not committed.') - # Make sure the result actually contains data. - if not result._chunks_encountered: + if len(rows_data.rows) == 0: return None - # Make sure the result was committed by the back-end. - if not result.committed: - raise ValueError('The row remains partial / is not committed.') - return result - def read_rows(self, start_key=None, end_key=None, - allow_row_interleaving=None, limit=None, filter_=None): + return rows_data.rows[row_key] + + def read_rows(self, start_key=None, end_key=None, limit=None, + filter_=None): """Read rows from this table. :type start_key: bytes @@ -261,26 +256,10 @@ def read_rows(self, start_key=None, end_key=None, The range will not include ``end_key``. If left empty, will be interpreted as an infinite string. - :type allow_row_interleaving: bool - :param allow_row_interleaving: (Optional) By default, rows are read - sequentially, producing results which - are guaranteed to arrive in increasing - row order. Setting - ``allow_row_interleaving`` to - :data:`True` allows multiple rows to be - interleaved in the response stream, - which increases throughput but breaks - this guarantee, and may force the - client to use more memory to buffer - partially-received rows. - :type limit: int :param limit: (Optional) The read will terminate after committing to N rows' worth of results. The default (zero) is to return - all results. Note that if ``allow_row_interleaving`` is - set to :data:`True`, partial results may be returned for - more than N rows. However, only N ``commit_row`` chunks - will be sent. + all results. :type filter_: :class:`.RowFilter` :param filter_: (Optional) The filter to apply to the contents of the @@ -293,11 +272,11 @@ def read_rows(self, start_key=None, end_key=None, """ request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, - allow_row_interleaving=allow_row_interleaving, limit=limit) + limit=limit) client = self._cluster._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) - # We expect an iterator of `data_messages_v1_pb2.ReadRowsResponse` + # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` return PartialRowsData(response_iterator) def sample_row_keys(self): @@ -331,7 +310,7 @@ def sample_row_keys(self): or by casting to a :class:`list` and can be cancelled by calling ``cancel()``. """ - request_pb = data_messages_v1_pb2.SampleRowKeysRequest( + request_pb = data_messages_v2_pb2.SampleRowKeysRequest( table_name=self.name) client = self._cluster._client response_iterator = client._data_stub.SampleRowKeys( @@ -340,7 +319,7 @@ def sample_row_keys(self): def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, - filter_=None, allow_row_interleaving=None, limit=None): + filter_=None, limit=None): """Creates a request to read rows in a table. :type table_name: str @@ -363,28 +342,12 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, :param filter_: (Optional) The filter to apply to the contents of the specified row(s). If unset, reads the entire table. - :type allow_row_interleaving: bool - :param allow_row_interleaving: (Optional) By default, rows are read - sequentially, producing results which are - guaranteed to arrive in increasing row - order. Setting - ``allow_row_interleaving`` to - :data:`True` allows multiple rows to be - interleaved in the response stream, - which increases throughput but breaks - this guarantee, and may force the - client to use more memory to buffer - partially-received rows. - :type limit: int :param limit: (Optional) The read will terminate after committing to N rows' worth of results. The default (zero) is to return - all results. Note that if ``allow_row_interleaving`` is - set to :data:`True`, partial results may be returned for - more than N rows. However, only N ``commit_row`` chunks - will be sent. + all results. - :rtype: :class:`data_messages_v1_pb2.ReadRowsRequest` + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. :raises: :class:`ValueError ` if both ``row_key`` and one of ``start_key`` and ``end_key`` are set @@ -394,21 +357,23 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, (start_key is not None or end_key is not None)): raise ValueError('Row key and row range cannot be ' 'set simultaneously') - if row_key is not None: - request_kwargs['row_key'] = _to_bytes(row_key) + range_kwargs = {} if start_key is not None or end_key is not None: - range_kwargs = {} if start_key is not None: - range_kwargs['start_key'] = _to_bytes(start_key) + range_kwargs['start_key_closed'] = _to_bytes(start_key) if end_key is not None: - range_kwargs['end_key'] = _to_bytes(end_key) - row_range = data_v1_pb2.RowRange(**range_kwargs) - request_kwargs['row_range'] = row_range + range_kwargs['end_key_open'] = _to_bytes(end_key) if filter_ is not None: request_kwargs['filter'] = filter_.to_pb() - if allow_row_interleaving is not None: - request_kwargs['allow_row_interleaving'] = allow_row_interleaving if limit is not None: - request_kwargs['num_rows_limit'] = limit + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) - return data_messages_v1_pb2.ReadRowsRequest(**request_kwargs) + return message diff --git a/gcloud/bigtable/test_row.py b/gcloud/bigtable/test_row.py index e2336d7520f3..2cc7630758d2 100644 --- a/gcloud/bigtable/test_row.py +++ b/gcloud/bigtable/test_row.py @@ -75,9 +75,6 @@ def _set_cell_helper(self, column=None, column_bytes=None, timestamp_micros=-1): import six import struct - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - row_key = b'row_key' column_family_id = u'column_family_id' if column is None: @@ -90,8 +87,8 @@ def _set_cell_helper(self, column=None, column_bytes=None, if isinstance(value, six.integer_types): value = struct.pack('>q', value) - expected_pb = data_v1_pb2.Mutation( - set_cell=data_v1_pb2.Mutation.SetCell( + expected_pb = _MutationPB( + set_cell=_MutationSetCellPB( family_name=column_family_id, column_qualifier=column_bytes or column, timestamp_micros=timestamp_micros, @@ -135,16 +132,13 @@ def test_set_cell_with_non_null_timestamp(self): timestamp_micros=millis_granularity) def test_delete(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - row_key = b'row_key' row = self._makeOne(row_key, object()) self.assertEqual(row._pb_mutations, []) row.delete() - expected_pb = data_v1_pb2.Mutation( - delete_from_row=data_v1_pb2.Mutation.DeleteFromRow(), + expected_pb = _MutationPB( + delete_from_row=_MutationDeleteFromRowPB(), ) self.assertEqual(row._pb_mutations, [expected_pb]) @@ -195,9 +189,6 @@ def test_delete_cells_non_iterable(self): row.delete_cells(column_family_id, columns) def test_delete_cells_all_columns(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - row_key = b'row_key' column_family_id = u'column_family_id' table = object() @@ -207,8 +198,8 @@ def test_delete_cells_all_columns(self): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, klass.ALL_COLUMNS) - expected_pb = data_v1_pb2.Mutation( - delete_from_family=data_v1_pb2.Mutation.DeleteFromFamily( + expected_pb = _MutationPB( + delete_from_family=_MutationDeleteFromFamilyPB( family_name=column_family_id, ), ) @@ -226,9 +217,6 @@ def test_delete_cells_no_columns(self): self.assertEqual(row._pb_mutations, []) def _delete_cells_helper(self, time_range=None): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - row_key = b'row_key' column = b'column' column_family_id = u'column_family_id' @@ -239,8 +227,8 @@ def _delete_cells_helper(self, time_range=None): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns, time_range=time_range) - expected_pb = data_v1_pb2.Mutation( - delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( + expected_pb = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id, column_qualifier=column, ), @@ -279,9 +267,6 @@ def test_delete_cells_with_bad_column(self): self.assertEqual(row._pb_mutations, []) def test_delete_cells_with_string_columns(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - row_key = b'row_key' column_family_id = u'column_family_id' column1 = u'column1' @@ -295,14 +280,14 @@ def test_delete_cells_with_string_columns(self): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns) - expected_pb1 = data_v1_pb2.Mutation( - delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( + expected_pb1 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id, column_qualifier=column1_bytes, ), ) - expected_pb2 = data_v1_pb2.Mutation( - delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( + expected_pb2 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id, column_qualifier=column2_bytes, ), @@ -311,10 +296,6 @@ def test_delete_cells_with_string_columns(self): def test_commit(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub row_key = b'row_key' @@ -328,15 +309,15 @@ def test_commit(self): # Create request_pb value = b'bytes-value' - mutation = data_v1_pb2.Mutation( - set_cell=data_v1_pb2.Mutation.SetCell( + mutation = _MutationPB( + set_cell=_MutationSetCellPB( family_name=column_family_id, column_qualifier=column, timestamp_micros=-1, # Default value. value=value, ), ) - request_pb = messages_v1_pb2.MutateRowRequest( + request_pb = _MutateRowRequestPB( table_name=table_name, row_key=row_key, mutations=[mutation], @@ -427,10 +408,6 @@ def test__get_mutations(self): self.assertTrue(false_mutations is row._get_mutations(None)) def test_commit(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.row_filters import RowSampleFilter @@ -449,29 +426,29 @@ def test_commit(self): # Create request_pb value1 = b'bytes-value' - mutation1 = data_v1_pb2.Mutation( - set_cell=data_v1_pb2.Mutation.SetCell( + mutation1 = _MutationPB( + set_cell=_MutationSetCellPB( family_name=column_family_id1, column_qualifier=column1, timestamp_micros=-1, # Default value. value=value1, ), ) - mutation2 = data_v1_pb2.Mutation( - delete_from_row=data_v1_pb2.Mutation.DeleteFromRow(), + mutation2 = _MutationPB( + delete_from_row=_MutationDeleteFromRowPB(), ) - mutation3 = data_v1_pb2.Mutation( - delete_from_column=data_v1_pb2.Mutation.DeleteFromColumn( + mutation3 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id2, column_qualifier=column2, ), ) - mutation4 = data_v1_pb2.Mutation( - delete_from_family=data_v1_pb2.Mutation.DeleteFromFamily( + mutation4 = _MutationPB( + delete_from_family=_MutationDeleteFromFamilyPB( family_name=column_family_id3, ), ) - request_pb = messages_v1_pb2.CheckAndMutateRowRequest( + request_pb = _CheckAndMutateRowRequestPB( table_name=table_name, row_key=row_key, predicate_filter=row_filter.to_pb(), @@ -481,7 +458,7 @@ def test_commit(self): # Create response_pb predicate_matched = True - response_pb = messages_v1_pb2.CheckAndMutateRowResponse( + response_pb = _CheckAndMutateRowResponsePB( predicate_matched=predicate_matched) # Patch the stub used by the API method. @@ -567,9 +544,6 @@ def test_clear(self): self.assertEqual(row._rule_pb_list, []) def test_append_cell_value(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - table = object() row_key = b'row_key' row = self._makeOne(row_key, table) @@ -579,15 +553,12 @@ def test_append_cell_value(self): column_family_id = u'column_family_id' value = b'bytes-val' row.append_cell_value(column_family_id, column, value) - expected_pb = data_v1_pb2.ReadModifyWriteRule( + expected_pb = _ReadModifyWriteRulePB( family_name=column_family_id, column_qualifier=column, append_value=value) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_increment_cell_value(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - table = object() row_key = b'row_key' row = self._makeOne(row_key, table) @@ -597,17 +568,13 @@ def test_increment_cell_value(self): column_family_id = u'column_family_id' int_value = 281330 row.increment_cell_value(column_family_id, column, int_value) - expected_pb = data_v1_pb2.ReadModifyWriteRule( + expected_pb = _ReadModifyWriteRulePB( family_name=column_family_id, column_qualifier=column, increment_amount=int_value) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_commit(self): from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import row as MUT @@ -623,11 +590,11 @@ def test_commit(self): # Create request_pb value = b'bytes-value' # We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value). - request_pb = messages_v1_pb2.ReadModifyWriteRowRequest( + request_pb = _ReadModifyWriteRowRequestPB( table_name=table_name, row_key=row_key, rules=[ - data_v1_pb2.ReadModifyWriteRule( + _ReadModifyWriteRulePB( family_name=column_family_id, column_qualifier=column, append_value=value, @@ -703,9 +670,6 @@ def _callFUT(self, row_response): def test_it(self): from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - col_fam1 = u'col-fam-id' col_fam2 = u'col-fam-id2' col_name1 = b'col-name1' @@ -734,28 +698,28 @@ def test_it(self): ], }, } - sample_input = data_v1_pb2.Row( + sample_input = _RowPB( families=[ - data_v1_pb2.Family( + _FamilyPB( name=col_fam1, columns=[ - data_v1_pb2.Column( + _ColumnPB( qualifier=col_name1, cells=[ - data_v1_pb2.Cell( + _CellPB( value=cell_val1, timestamp_micros=microseconds, ), - data_v1_pb2.Cell( + _CellPB( value=cell_val2, timestamp_micros=microseconds, ), ], ), - data_v1_pb2.Column( + _ColumnPB( qualifier=col_name2, cells=[ - data_v1_pb2.Cell( + _CellPB( value=cell_val3, timestamp_micros=microseconds, ), @@ -763,13 +727,13 @@ def test_it(self): ), ], ), - data_v1_pb2.Family( + _FamilyPB( name=col_fam2, columns=[ - data_v1_pb2.Column( + _ColumnPB( qualifier=col_name3, cells=[ - data_v1_pb2.Cell( + _CellPB( value=cell_val4, timestamp_micros=microseconds, ), @@ -790,9 +754,6 @@ def _callFUT(self, family_pb): def test_it(self): from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - col_fam1 = u'col-fam-id' col_name1 = b'col-name1' col_name2 = b'col-name2' @@ -812,26 +773,26 @@ def test_it(self): ], } expected_output = (col_fam1, expected_dict) - sample_input = data_v1_pb2.Family( + sample_input = _FamilyPB( name=col_fam1, columns=[ - data_v1_pb2.Column( + _ColumnPB( qualifier=col_name1, cells=[ - data_v1_pb2.Cell( + _CellPB( value=cell_val1, timestamp_micros=microseconds, ), - data_v1_pb2.Cell( + _CellPB( value=cell_val2, timestamp_micros=microseconds, ), ], ), - data_v1_pb2.Column( + _ColumnPB( qualifier=col_name2, cells=[ - data_v1_pb2.Cell( + _CellPB( value=cell_val3, timestamp_micros=microseconds, ), @@ -842,6 +803,90 @@ def test_it(self): self.assertEqual(expected_output, self._callFUT(sample_input)) +def _CheckAndMutateRowRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowRequest(*args, **kw) + + +def _CheckAndMutateRowResponsePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) + + +def _MutateRowRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.MutateRowRequest(*args, **kw) + + +def _ReadModifyWriteRowRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw) + + +def _CellPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Cell(*args, **kw) + + +def _ColumnPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Column(*args, **kw) + + +def _FamilyPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Family(*args, **kw) + + +def _MutationPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation(*args, **kw) + + +def _MutationSetCellPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.SetCell(*args, **kw) + + +def _MutationDeleteFromColumnPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) + + +def _MutationDeleteFromFamilyPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) + + +def _MutationDeleteFromRowPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) + + +def _RowPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Row(*args, **kw) + + +def _ReadModifyWriteRulePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ReadModifyWriteRule(*args, **kw) + + class _Client(object): data_stub = None diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index 2c3c9ba260f5..6fae4d18c40b 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -105,8 +105,6 @@ def test_constructor(self): partial_row_data = self._makeOne(row_key) self.assertTrue(partial_row_data._row_key is row_key) self.assertEqual(partial_row_data._cells, {}) - self.assertFalse(partial_row_data._committed) - self.assertFalse(partial_row_data._chunks_encountered) def test___eq__(self): row_key = object() @@ -133,13 +131,6 @@ def test___ne__(self): partial_row_data2 = self._makeOne(row_key2) self.assertNotEqual(partial_row_data1, partial_row_data2) - def test___ne__committed(self): - row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data1._committed = object() - partial_row_data2 = self._makeOne(row_key) - self.assertNotEqual(partial_row_data1, partial_row_data2) - def test___ne__cells(self): row_key = object() partial_row_data1 = self._makeOne(row_key) @@ -190,202 +181,6 @@ def test_row_key_getter(self): partial_row_data = self._makeOne(row_key) self.assertTrue(partial_row_data.row_key is row_key) - def test_committed_getter(self): - partial_row_data = self._makeOne(None) - partial_row_data._committed = value = object() - self.assertTrue(partial_row_data.committed is value) - - def test_clear(self): - partial_row_data = self._makeOne(None) - cells = {1: 2} - partial_row_data._cells = cells - self.assertEqual(partial_row_data.cells, cells) - partial_row_data._committed = True - partial_row_data._chunks_encountered = True - partial_row_data.clear() - self.assertFalse(partial_row_data.committed) - self.assertFalse(partial_row_data._chunks_encountered) - self.assertEqual(partial_row_data.cells, {}) - - def test__handle_commit_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) - - index = last_chunk_index = 1 - self.assertFalse(partial_row_data.committed) - partial_row_data._handle_commit_row(chunk, index, last_chunk_index) - self.assertTrue(partial_row_data.committed) - - def test__handle_commit_row_false(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=False) - - with self.assertRaises(ValueError): - partial_row_data._handle_commit_row(chunk, None, None) - - def test__handle_commit_row_not_last_chunk(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) - - with self.assertRaises(ValueError): - index = 0 - last_chunk_index = 1 - self.assertNotEqual(index, last_chunk_index) - partial_row_data._handle_commit_row( - chunk, index, last_chunk_index) - - def test__handle_reset_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=True) - - # Modify the PartialRowData object so we can check it's been cleared. - partial_row_data._cells = {1: 2} - partial_row_data._committed = True - partial_row_data._handle_reset_row(chunk) - self.assertEqual(partial_row_data.cells, {}) - self.assertFalse(partial_row_data.committed) - - def test__handle_reset_row_failure(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=False) - - with self.assertRaises(ValueError): - partial_row_data._handle_reset_row(chunk) - - def test__handle_row_contents(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable.row_data import Cell - - partial_row_data = self._makeOne(None) - cell1_pb = data_v1_pb2.Cell(timestamp_micros=1, value=b'val1') - cell2_pb = data_v1_pb2.Cell(timestamp_micros=200, value=b'val2') - cell3_pb = data_v1_pb2.Cell(timestamp_micros=300000, value=b'val3') - col1 = b'col1' - col2 = b'col2' - columns = [ - data_v1_pb2.Column(qualifier=col1, cells=[cell1_pb, cell2_pb]), - data_v1_pb2.Column(qualifier=col2, cells=[cell3_pb]), - ] - family_name = u'name' - row_contents = data_v1_pb2.Family(name=family_name, columns=columns) - chunk = messages_v1_pb2.ReadRowsResponse.Chunk( - row_contents=row_contents) - - self.assertEqual(partial_row_data.cells, {}) - partial_row_data._handle_row_contents(chunk) - expected_cells = { - family_name: { - col1: [Cell.from_pb(cell1_pb), Cell.from_pb(cell2_pb)], - col2: [Cell.from_pb(cell3_pb)], - } - } - self.assertEqual(partial_row_data.cells, expected_cells) - - def test_update_from_read_rows(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - row_key = b'row-key' - partial_row_data = self._makeOne(row_key) - - # Set-up chunk1, some data that will be reset by chunk2. - ignored_family_name = u'ignore-name' - row_contents = data_v1_pb2.Family(name=ignored_family_name) - chunk1 = messages_v1_pb2.ReadRowsResponse.Chunk( - row_contents=row_contents) - - # Set-up chunk2, a reset row. - chunk2 = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=True) - - # Set-up chunk3, a column family with no columns. - family_name = u'name' - row_contents = data_v1_pb2.Family(name=family_name) - chunk3 = messages_v1_pb2.ReadRowsResponse.Chunk( - row_contents=row_contents) - - # Set-up chunk4, a commit row. - chunk4 = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) - - # Prepare request and make sure PartialRowData is empty before. - read_rows_response_pb = messages_v1_pb2.ReadRowsResponse( - row_key=row_key, chunks=[chunk1, chunk2, chunk3, chunk4]) - self.assertEqual(partial_row_data.cells, {}) - self.assertFalse(partial_row_data.committed) - self.assertFalse(partial_row_data._chunks_encountered) - - # Parse the response and make sure the cells took place. - partial_row_data.update_from_read_rows(read_rows_response_pb) - self.assertEqual(partial_row_data.cells, {family_name: {}}) - self.assertFalse(ignored_family_name in partial_row_data.cells) - self.assertTrue(partial_row_data.committed) - self.assertTrue(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_while_committed(self): - partial_row_data = self._makeOne(None) - partial_row_data._committed = True - self.assertFalse(partial_row_data._chunks_encountered) - - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(None) - - self.assertFalse(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_row_key_disagree(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - row_key1 = b'row-key1' - row_key2 = b'row-key2' - partial_row_data = self._makeOne(row_key1) - self.assertFalse(partial_row_data._chunks_encountered) - - self.assertNotEqual(row_key1, row_key2) - read_rows_response_pb = messages_v1_pb2.ReadRowsResponse( - row_key=row_key2) - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(read_rows_response_pb) - - self.assertFalse(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_empty_chunk(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - row_key = b'row-key' - partial_row_data = self._makeOne(row_key) - self.assertFalse(partial_row_data._chunks_encountered) - - chunk = messages_v1_pb2.ReadRowsResponse.Chunk() - read_rows_response_pb = messages_v1_pb2.ReadRowsResponse( - row_key=row_key, chunks=[chunk]) - - # This makes it an "empty" chunk. - self.assertEqual(chunk.WhichOneof('chunk'), None) - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(read_rows_response_pb) - - self.assertFalse(partial_row_data._chunks_encountered) - class TestPartialRowsData(unittest2.TestCase): @@ -444,6 +239,16 @@ def test___ne__(self): partial_rows_data2 = self._makeOne(response_iterator2) self.assertNotEqual(partial_rows_data1, partial_rows_data2) + def test_state_start(self): + prd = self._makeOne([]) + self.assertEqual(prd.state, prd.START) + + def test_state_new_row_w_row(self): + prd = self._makeOne([]) + prd._last_scanned_row_key = '' + prd._row = object() + self.assertEqual(prd.state, prd.NEW_ROW) + def test_rows_getter(self): partial_rows_data = self._makeOne(None) partial_rows_data._rows = value = object() @@ -456,43 +261,7 @@ def test_cancel(self): partial_rows_data.cancel() self.assertEqual(response_iterator.cancel_calls, 1) - def test_consume_next(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable.row_data import PartialRowData - - row_key = b'row-key' - value_pb = messages_v1_pb2.ReadRowsResponse(row_key=row_key) - response_iterator = _MockCancellableIterator(value_pb) - partial_rows_data = self._makeOne(response_iterator) - self.assertEqual(partial_rows_data.rows, {}) - partial_rows_data.consume_next() - expected_rows = {row_key: PartialRowData(row_key)} - self.assertEqual(partial_rows_data.rows, expected_rows) - - def test_consume_next_row_exists(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable.row_data import PartialRowData - - row_key = b'row-key' - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) - value_pb = messages_v1_pb2.ReadRowsResponse( - row_key=row_key, chunks=[chunk]) - response_iterator = _MockCancellableIterator(value_pb) - partial_rows_data = self._makeOne(response_iterator) - existing_values = PartialRowData(row_key) - partial_rows_data._rows[row_key] = existing_values - self.assertFalse(existing_values.committed) - partial_rows_data.consume_next() - self.assertTrue(existing_values.committed) - self.assertEqual(existing_values.cells, {}) - - def test_consume_next_empty_iter(self): - response_iterator = _MockCancellableIterator() - partial_rows_data = self._makeOne(response_iterator) - with self.assertRaises(StopIteration): - partial_rows_data.consume_next() + # 'consume_nest' tested via 'TestPartialRowsData_JSON_acceptance_tests' def test_consume_all(self): klass = self._getDoNothingClass() @@ -518,41 +287,6 @@ def test_consume_all_with_max_loops(self): self.assertEqual( list(response_iterator.iter_values), [value2, value3]) - -class TestPartialRowsDataV2(unittest2.TestCase): - - _json_tests = None - - def _getTargetClass(self): - from gcloud.bigtable.row_data import PartialRowsDataV2 - return PartialRowsDataV2 - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def _load_json_test(self, test_name): - import os - if self.__class__._json_tests is None: - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, 'read-rows-acceptance-test.json') - raw = _parse_readrows_acceptance_tests(filename) - tests = self.__class__._json_tests = {} - for (name, chunks, results) in raw: - tests[name] = chunks, results - return self.__class__._json_tests[test_name] - - # Not part of the JSON acceptance tests. - - def test_state_start(self): - prd = self._makeOne([]) - self.assertEqual(prd.state, prd.START) - - def test_state_new_row_w_row(self): - prd = self._makeOne([]) - prd._last_scanned_row_key = '' - prd._row = object() - self.assertEqual(prd.state, prd.NEW_ROW) - def test__copy_from_current_unset(self): prd = self._makeOne([]) chunks = _generate_cell_chunks(['']) @@ -571,7 +305,7 @@ def test__copy_from_current_blank(self): TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] prd = self._makeOne([]) - prd._cell = _PartialCellV2() + prd._cell = _PartialCellData() chunks = _generate_cell_chunks(['']) chunk = chunks[0] chunk.row_key = ROW_KEY @@ -588,7 +322,7 @@ def test__copy_from_current_blank(self): def test__copy_from_previous_unset(self): prd = self._makeOne([]) - cell = _PartialCellV2() + cell = _PartialCellData() prd._copy_from_previous(cell) self.assertEqual(cell.row_key, '') self.assertEqual(cell.family_name, u'') @@ -603,14 +337,14 @@ def test__copy_from_previous_blank(self): TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] prd = self._makeOne([]) - cell = _PartialCellV2( + cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, qualifier=QUALIFIER, timestamp_micros=TIMESTAMP_MICROS, labels=LABELS, ) - prd._previous_cell = _PartialCellV2() + prd._previous_cell = _PartialCellData() prd._copy_from_previous(cell) self.assertEqual(cell.row_key, ROW_KEY) self.assertEqual(cell.family_name, FAMILY_NAME) @@ -625,14 +359,14 @@ def test__copy_from_previous_filled(self): TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] prd = self._makeOne([]) - prd._previous_cell = _PartialCellV2( + prd._previous_cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, qualifier=QUALIFIER, timestamp_micros=TIMESTAMP_MICROS, labels=LABELS, ) - cell = _PartialCellV2() + cell = _PartialCellData() prd._copy_from_previous(cell) self.assertEqual(cell.row_key, ROW_KEY) self.assertEqual(cell.family_name, FAMILY_NAME) @@ -687,6 +421,29 @@ def test_invalid_empty_second_chunk(self): with self.assertRaises(InvalidChunk): prd.consume_next() + +class TestPartialRowsData_JSON_acceptance_tests(unittest2.TestCase): + + _json_tests = None + + def _getTargetClass(self): + from gcloud.bigtable.row_data import PartialRowsData + return PartialRowsData + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _load_json_test(self, test_name): + import os + if self.__class__._json_tests is None: + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, 'read-rows-acceptance-test.json') + raw = _parse_readrows_acceptance_tests(filename) + tests = self.__class__._json_tests = {} + for (name, chunks, results) in raw: + tests[name] = chunks, results + return self.__class__._json_tests[test_name] + # JSON Error cases: invalid chunks def _fail_during_consume(self, testcase_name): @@ -910,6 +667,9 @@ def cancel(self): def next(self): return next(self.iter_values) + def __next__(self): # pragma: NO COVER Py3k + return self.next() + class _Dummy(object): @@ -917,7 +677,7 @@ def __init__(self, **kw): self.__dict__.update(kw) -class _PartialCellV2(object): +class _PartialCellData(object): row_key = '' family_name = u'' diff --git a/gcloud/bigtable/test_row_filters.py b/gcloud/bigtable/test_row_filters.py index 1ea7ecb2fc62..594a4fe47c2b 100644 --- a/gcloud/bigtable/test_row_filters.py +++ b/gcloud/bigtable/test_row_filters.py @@ -420,7 +420,7 @@ def test_to_pb_inclusive_start(self): row_filter = self._makeOne(column_family_id, start_column=column) col_range_pb = _ColumnRangePB( family_name=column_family_id, - start_qualifier_inclusive=column, + start_qualifier_closed=column, ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -432,7 +432,7 @@ def test_to_pb_exclusive_start(self): inclusive_start=False) col_range_pb = _ColumnRangePB( family_name=column_family_id, - start_qualifier_exclusive=column, + start_qualifier_open=column, ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -443,7 +443,7 @@ def test_to_pb_inclusive_end(self): row_filter = self._makeOne(column_family_id, end_column=column) col_range_pb = _ColumnRangePB( family_name=column_family_id, - end_qualifier_inclusive=column, + end_qualifier_closed=column, ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -455,7 +455,7 @@ def test_to_pb_exclusive_end(self): inclusive_end=False) col_range_pb = _ColumnRangePB( family_name=column_family_id, - end_qualifier_exclusive=column, + end_qualifier_open=column, ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -543,28 +543,28 @@ def test_to_pb(self): def test_to_pb_inclusive_start(self): value = b'some-value' row_filter = self._makeOne(start_value=value) - val_range_pb = _ValueRangePB(start_value_inclusive=value) + val_range_pb = _ValueRangePB(start_value_closed=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): value = b'some-value' row_filter = self._makeOne(start_value=value, inclusive_start=False) - val_range_pb = _ValueRangePB(start_value_exclusive=value) + val_range_pb = _ValueRangePB(start_value_open=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): value = b'some-value' row_filter = self._makeOne(end_value=value) - val_range_pb = _ValueRangePB(end_value_inclusive=value) + val_range_pb = _ValueRangePB(end_value_closed=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): value = b'some-value' row_filter = self._makeOne(end_value=value, inclusive_end=False) - val_range_pb = _ValueRangePB(end_value_exclusive=value) + val_range_pb = _ValueRangePB(end_value_open=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -960,42 +960,42 @@ def test_to_pb_false_only(self): def _ColumnRangePB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - return data_v1_pb2.ColumnRange(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ColumnRange(*args, **kw) def _RowFilterPB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - return data_v1_pb2.RowFilter(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter(*args, **kw) def _RowFilterChainPB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - return data_v1_pb2.RowFilter.Chain(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Chain(*args, **kw) def _RowFilterConditionPB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - return data_v1_pb2.RowFilter.Condition(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Condition(*args, **kw) def _RowFilterInterleavePB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - return data_v1_pb2.RowFilter.Interleave(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Interleave(*args, **kw) def _TimestampRangePB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - return data_v1_pb2.TimestampRange(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.TimestampRange(*args, **kw) def _ValueRangePB(*args, **kw): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - return data_v1_pb2.ValueRange(*args, **kw) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ValueRange(*args, **kw) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index 0f015777aadf..a6339329dfca 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -18,6 +18,12 @@ class TestTable(unittest2.TestCase): + ROW_KEY = b'row-key' + FAMILY_NAME = u'family' + QUALIFIER = b'qualifier' + TIMESTAMP_MICROS = 100 + VALUE = b'value' + def _getTargetClass(self): from gcloud.bigtable.table import Table return Table @@ -125,10 +131,7 @@ def test___ne__(self): self.assertNotEqual(table1, table2) def _create_test_helper(self, initial_split_keys): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_v1_pb2) + from gcloud._helpers import _to_bytes from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -144,14 +147,17 @@ def _create_test_helper(self, initial_split_keys): table = self._makeOne(table_id, cluster) # Create request_pb - request_pb = messages_v1_pb2.CreateTableRequest( - initial_split_keys=initial_split_keys, + splits_pb = [ + _CreateTableRequestSplitPB(key=_to_bytes(key)) + for key in initial_split_keys or ()] + request_pb = _CreateTableRequestPB( + initial_splits=splits_pb, name=cluster_name, table_id=table_id, ) # Create response_pb - response_pb = data_v1_pb2.Table() + response_pb = _TablePB() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -173,14 +179,10 @@ def test_create(self): self._create_test_helper(initial_split_keys) def test_create_with_split_keys(self): - initial_split_keys = ['s1', 's2'] + initial_split_keys = [b's1', b's2'] self._create_test_helper(initial_split_keys) - def _list_column_families_helper(self, column_family_name=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_v1_pb2) + def _list_column_families_helper(self): from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -197,15 +199,12 @@ def _list_column_families_helper(self, column_family_name=None): # Create request_pb table_name = cluster_name + '/tables/' + table_id - request_pb = messages_v1_pb2.GetTableRequest(name=table_name) + request_pb = _GetTableRequestPB(name=table_name) # Create response_pb column_family_id = 'foo' - if column_family_name is None: - column_family_name = (table_name + '/columnFamilies/' + - column_family_id) - column_family = data_v1_pb2.ColumnFamily(name=column_family_name) - response_pb = data_v1_pb2.Table( + column_family = _ColumnFamilyPB() + response_pb = _TablePB( column_families={column_family_id: column_family}, ) @@ -229,16 +228,8 @@ def _list_column_families_helper(self, column_family_name=None): def test_list_column_families(self): self._list_column_families_helper() - def test_list_column_families_failure(self): - column_family_name = 'not-the-right-format' - with self.assertRaises(ValueError): - self._list_column_families_helper( - column_family_name=column_family_name) - def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -255,7 +246,7 @@ def test_delete(self): # Create request_pb table_name = cluster_name + '/tables/' + table_id - request_pb = messages_v1_pb2.DeleteTableRequest(name=table_name) + request_pb = _DeleteTableRequestPB(name=table_name) # Create response_pb response_pb = empty_pb2.Empty() @@ -275,12 +266,9 @@ def test_delete(self): {}, )]) - def _read_row_helper(self, chunks): + def _read_row_helper(self, chunks, expected_result): from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable.row_data import PartialRowData from gcloud.bigtable import table as MUT project_id = 'project-id' @@ -303,26 +291,16 @@ def mock_create_row_request(table_name, row_key, filter_): return request_pb # Create response_iterator - row_key = b'row-key' - response_pb = messages_v1_pb2.ReadRowsResponse( - row_key=row_key, chunks=chunks) - response_iterator = [response_pb] + response_pb = _ReadRowsResponsePB(chunks=chunks) + response_iterator = iter([response_pb]) # Patch the stub used by the API method. client._data_stub = stub = _FakeStub(response_iterator) - # Create expected_result. - if chunks: - expected_result = PartialRowData(row_key) - expected_result._committed = True - expected_result._chunks_encountered = True - else: - expected_result = None - # Perform the method and check the result. filter_obj = object() with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_row(row_key, filter_=filter_obj) + result = table.read_row(self.ROW_KEY, filter_=filter_obj) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( @@ -330,29 +308,44 @@ def mock_create_row_request(table_name, row_key, filter_): (request_pb, timeout_seconds), {}, )]) - self.assertEqual(mock_created, [(table.name, row_key, filter_obj)]) + self.assertEqual(mock_created, + [(table.name, self.ROW_KEY, filter_obj)]) + + def test_read_empty_row(self): + chunks = [] + self._read_row_helper(chunks, None) def test_read_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) + from gcloud.bigtable.row_data import Cell + from gcloud.bigtable.row_data import PartialRowData - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(commit_row=True) + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) chunks = [chunk] - self._read_row_helper(chunks) - - def test_read_empty_row(self): - chunks = [] - self._read_row_helper(chunks) + expected_result = PartialRowData(row_key=self.ROW_KEY) + family = expected_result._cells.setdefault(self.FAMILY_NAME, {}) + column = family.setdefault(self.QUALIFIER, []) + column.append(Cell.from_pb(chunk)) + self._read_row_helper(chunks, expected_result) def test_read_row_still_partial(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - # There is never a "commit row". - chunk = messages_v1_pb2.ReadRowsResponse.Chunk(reset_row=True) + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + ) + # No "commit row". chunks = [chunk] with self.assertRaises(ValueError): - self._read_row_helper(chunks) + self._read_row_helper(chunks, None) def test_read_rows(self): from gcloud._testing import _Monkey @@ -392,12 +385,11 @@ def mock_create_row_request(table_name, **kwargs): start_key = b'start-key' end_key = b'end-key' filter_obj = object() - allow_row_interleaving = True limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, - allow_row_interleaving=allow_row_interleaving, limit=limit) + limit=limit) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( @@ -409,14 +401,11 @@ def mock_create_row_request(table_name, **kwargs): 'start_key': start_key, 'end_key': end_key, 'filter_': filter_obj, - 'allow_row_interleaving': allow_row_interleaving, 'limit': limit, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_sample_row_keys(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -433,7 +422,7 @@ def test_sample_row_keys(self): # Create request_pb table_name = cluster_name + '/tables/' + table_id - request_pb = messages_v1_pb2.SampleRowKeysRequest( + request_pb = _SampleRowKeysRequestPB( table_name=table_name) # Create response_iterator @@ -458,20 +447,16 @@ def test_sample_row_keys(self): class Test__create_row_request(unittest2.TestCase): def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, allow_row_interleaving=None, limit=None): + filter_=None, limit=None): from gcloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - filter_=filter_, allow_row_interleaving=allow_row_interleaving, - limit=limit) + filter_=filter_, limit=limit) def test_table_name_only(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - table_name = 'table_name' result = self._callFUT(table_name) - expected_result = messages_v1_pb2.ReadRowsRequest( + expected_result = _ReadRowsRequestPB( table_name=table_name) self.assertEqual(result, expected_result) @@ -480,108 +465,129 @@ def test_row_key_row_range_conflict(self): self._callFUT(None, row_key=object(), end_key=object()) def test_row_key(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - table_name = 'table_name' row_key = b'row_key' result = self._callFUT(table_name, row_key=row_key) - expected_result = messages_v1_pb2.ReadRowsRequest( + expected_result = _ReadRowsRequestPB( table_name=table_name, - row_key=row_key, ) + expected_result.rows.row_keys.append(row_key) self.assertEqual(result, expected_result) def test_row_range_start_key(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - table_name = 'table_name' start_key = b'start_key' result = self._callFUT(table_name, start_key=start_key) - expected_result = messages_v1_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_v1_pb2.RowRange(start_key=start_key), - ) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) def test_row_range_end_key(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - table_name = 'table_name' end_key = b'end_key' result = self._callFUT(table_name, end_key=end_key) - expected_result = messages_v1_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_v1_pb2.RowRange(end_key=end_key), - ) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(end_key_open=end_key) self.assertEqual(result, expected_result) def test_row_range_both_keys(self): - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - table_name = 'table_name' start_key = b'start_key' end_key = b'end_key' result = self._callFUT(table_name, start_key=start_key, end_key=end_key) - expected_result = messages_v1_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_v1_pb2.RowRange( - start_key=start_key, end_key=end_key), - ) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add( + start_key_closed=start_key, end_key_open=end_key) self.assertEqual(result, expected_result) def test_with_filter(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.row_filters import RowSampleFilter - table_name = 'table_name' row_filter = RowSampleFilter(0.33) result = self._callFUT(table_name, filter_=row_filter) - expected_result = messages_v1_pb2.ReadRowsRequest( + expected_result = _ReadRowsRequestPB( table_name=table_name, filter=row_filter.to_pb(), ) self.assertEqual(result, expected_result) - def test_with_allow_row_interleaving(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - - table_name = 'table_name' - allow_row_interleaving = True - result = self._callFUT(table_name, - allow_row_interleaving=allow_row_interleaving) - expected_result = messages_v1_pb2.ReadRowsRequest( - table_name=table_name, - allow_row_interleaving=allow_row_interleaving, - ) - self.assertEqual(result, expected_result) - def test_with_limit(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_v1_pb2) - table_name = 'table_name' limit = 1337 result = self._callFUT(table_name, limit=limit) - expected_result = messages_v1_pb2.ReadRowsRequest( + expected_result = _ReadRowsRequestPB( table_name=table_name, - num_rows_limit=limit, + rows_limit=limit, ) self.assertEqual(result, expected_result) +def _CreateTableRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest(*args, **kw) + + +def _CreateTableRequestSplitPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest.Split(*args, **kw) + + +def _DeleteTableRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.DeleteTableRequest(*args, **kw) + + +def _GetTableRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.GetTableRequest(*args, **kw) + + +def _ReadRowsRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsRequest(*args, **kw) + + +def _ReadRowsResponseCellChunkPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + family_name = kw.pop('family_name') + qualifier = kw.pop('qualifier') + message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) + message.family_name.value = family_name + message.qualifier.value = qualifier + return message + + +def _ReadRowsResponsePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsResponse(*args, **kw) + + +def _SampleRowKeysRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.SampleRowKeysRequest(*args, **kw) + + +def _TablePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.Table(*args, **kw) + + +def _ColumnFamilyPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) + + class _Client(object): data_stub = None From 4ed0b8f145c76038d7d670023e8f2189b75681f1 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 27 Jun 2016 17:33:29 -0400 Subject: [PATCH 033/103] Drop 'Cluster.table' and 'Cluster.list_tables'. In V2, those operations are on the instance. --- gcloud/bigtable/cluster.py | 38 ---------------- gcloud/bigtable/test_cluster.py | 79 --------------------------------- 2 files changed, 117 deletions(-) diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index 28875730c292..85bd6966ad03 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -24,9 +24,6 @@ bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._generated import ( bigtable_cluster_service_messages_pb2 as messages_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_v1_pb2) -from gcloud.bigtable.table import Table _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' @@ -230,17 +227,6 @@ def __init__(self, zone, cluster_id, client, self.serve_nodes = serve_nodes self._client = client - def table(self, table_id): - """Factory to create a table associated with this cluster. - - :type table_id: str - :param table_id: The ID of the table. - - :rtype: :class:`Table ` - :returns: The table owned by this cluster. - """ - return Table(table_id, self) - def _update_from_pb(self, cluster_pb): """Refresh self from the server-provided protobuf. @@ -464,27 +450,3 @@ def undelete(self): op_id, op_begin = _process_operation(operation_pb2) return Operation('undelete', op_id, op_begin, cluster=self) - - def list_tables(self): - """List the tables in this cluster. - - :rtype: list of :class:`Table ` - :returns: The list of tables owned by the cluster. - :raises: :class:`ValueError ` if one of the - returned tables has a name that is not of the expected format. - """ - request_pb = table_messages_v1_pb2.ListTablesRequest(name=self.name) - # We expect a `table_messages_v1_pb2.ListTablesResponse` - table_list_pb = self._client._table_stub.ListTables( - request_pb, self._client.timeout_seconds) - - result = [] - for table_pb in table_list_pb.tables: - table_prefix = self.name + '/tables/' - if not table_pb.name.startswith(table_prefix): - raise ValueError('Table name %s not of expected format' % ( - table_pb.name,)) - table_id = table_pb.name[len(table_prefix):] - result.append(self.table(table_id)) - - return result diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index 294f9a0d0f55..d884ae8783dc 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -194,19 +194,6 @@ def test_copy(self): self.assertFalse(cluster is new_cluster) self.assertEqual(cluster, new_cluster) - def test_table_factory(self): - from gcloud.bigtable.table import Table - - zone = 'zone' - cluster_id = 'cluster-id' - cluster = self._makeOne(zone, cluster_id, None) - - table_id = 'table_id' - table = cluster.table(table_id) - self.assertTrue(isinstance(table, Table)) - self.assertEqual(table.table_id, table_id) - self.assertEqual(table._cluster, cluster) - def test__update_from_pb_success(self): from gcloud.bigtable._generated import ( bigtable_cluster_data_pb2 as data_v1_pb2) @@ -612,72 +599,6 @@ def mock_process_operation(operation_pb): )]) self.assertEqual(process_operation_called, [response_pb]) - def _list_tables_helper(self, table_id, table_name=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as table_data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 45 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_ - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = table_messages_v1_pb2.ListTablesRequest( - name=cluster_name) - - # Create response_pb - table_name = table_name or (cluster_name + '/tables/' + table_id) - response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[ - table_data_pb2.Table(name=table_name), - ], - ) - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_table = cluster.table(table_id) - expected_result = [expected_table] - - # Perform the method and check the result. - result = cluster.list_tables() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListTables', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_tables(self): - table_id = 'table_id' - self._list_tables_helper(table_id) - - def test_list_tables_failure_bad_split(self): - with self.assertRaises(ValueError): - self._list_tables_helper(None, table_name='wrong-format') - - def test_list_tables_failure_name_bad_before(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - - table_id = 'table_id' - bad_table_name = ('nonempty-section-before' + - 'projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) - with self.assertRaises(ValueError): - self._list_tables_helper(table_id, table_name=bad_table_name) - class Test__prepare_create_request(unittest2.TestCase): From 7cbb215ef93de8d59be6201521cd4f906373e7ac Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 27 Jun 2016 17:34:28 -0400 Subject: [PATCH 034/103] In V2, tables hold a ref. to their instance, not a cluster. --- gcloud/bigtable/table.py | 26 +++--- gcloud/bigtable/test_table.py | 146 ++++++++++++++-------------------- 2 files changed, 73 insertions(+), 99 deletions(-) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 83182d9f2a04..159fc4566c42 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -52,13 +52,13 @@ class Table(object): :type table_id: str :param table_id: The ID of the table. - :type cluster: :class:`Cluster <.cluster.Cluster>` - :param cluster: The cluster that owns the table. + :type instance: :class:`Cluster <.instance.Instance>` + :param instance: The instance that owns the table. """ - def __init__(self, table_id, cluster): + def __init__(self, table_id, instance): self.table_id = table_id - self._cluster = cluster + self._instance = instance @property def name(self): @@ -76,7 +76,7 @@ def name(self): :rtype: str :returns: The table name. """ - return self._cluster.name + '/tables/' + self.table_id + return self._instance.name + '/tables/' + self.table_id def column_family(self, column_family_id, gc_rule=None): """Factory to create a column family associated with this table. @@ -131,7 +131,7 @@ def __eq__(self, other): if not isinstance(other, self.__class__): return False return (other.table_id == self.table_id and - other._cluster == self._cluster) + other._instance == self._instance) def __ne__(self, other): return not self.__eq__(other) @@ -170,10 +170,10 @@ def create(self, initial_split_keys=None): split_pb(key=key) for key in initial_split_keys] request_pb = table_admin_messages_v2_pb2.CreateTableRequest( initial_splits=initial_split_keys or [], - name=self._cluster.name, + name=self._instance.name, table_id=self.table_id, ) - client = self._cluster._client + client = self._instance._client # We expect a `._generated.bigtable_table_data_pb2.Table` client._table_stub.CreateTable(request_pb, client.timeout_seconds) @@ -181,7 +181,7 @@ def delete(self): """Delete this table.""" request_pb = table_admin_messages_v2_pb2.DeleteTableRequest( name=self.name) - client = self._cluster._client + client = self._instance._client # We expect a `google.protobuf.empty_pb2.Empty` client._table_stub.DeleteTable(request_pb, client.timeout_seconds) @@ -198,7 +198,7 @@ def list_column_families(self): """ request_pb = table_admin_messages_v2_pb2.GetTableRequest( name=self.name) - client = self._cluster._client + client = self._instance._client # We expect a `._generated.bigtable_table_data_pb2.Table` table_pb = client._table_stub.GetTable(request_pb, client.timeout_seconds) @@ -229,7 +229,7 @@ def read_row(self, row_key, filter_=None): """ request_pb = _create_row_request(self.name, row_key=row_key, filter_=filter_) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) rows_data = PartialRowsData(response_iterator) @@ -273,7 +273,7 @@ def read_rows(self, start_key=None, end_key=None, limit=None, request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` @@ -312,7 +312,7 @@ def sample_row_keys(self): """ request_pb = data_messages_v2_pb2.SampleRowKeysRequest( table_name=self.name) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.SampleRowKeys( request_pb, client.timeout_seconds) return response_iterator diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index a6339329dfca..1eb2d81d3b10 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -18,6 +18,12 @@ class TestTable(unittest2.TestCase): + PROJECT_ID = 'project-id' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) + TABLE_ID = 'table-id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + TIMEOUT_SECONDS = 1333 ROW_KEY = b'row-key' FAMILY_NAME = u'family' QUALIFIER = b'qualifier' @@ -33,19 +39,19 @@ def _makeOne(self, *args, **kwargs): def test_constructor(self): table_id = 'table-id' - cluster = object() + instance = object() - table = self._makeOne(table_id, cluster) + table = self._makeOne(table_id, instance) self.assertEqual(table.table_id, table_id) - self.assertTrue(table._cluster is cluster) + self.assertTrue(table._instance is instance) def test_name_property(self): table_id = 'table-id' - cluster_name = 'cluster_name' + instance_name = 'instance_name' - cluster = _Cluster(cluster_name) - table = self._makeOne(table_id, cluster) - expected_name = cluster_name + '/tables/' + table_id + instance = _Instance(instance_name) + table = self._makeOne(table_id, instance) + expected_name = instance_name + '/tables/' + table_id self.assertEqual(table.name, expected_name) def test_column_family_factory(self): @@ -100,51 +106,40 @@ def test_row_factory_append(self): self.assertEqual(row._table, table) def test_row_factory_failure(self): - table_id = 'table-id' - table = self._makeOne(table_id, None) + table = self._makeOne(self.TABLE_ID, None) with self.assertRaises(ValueError): table.row(b'row_key', filter_=object(), append=True) def test___eq__(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) self.assertEqual(table1, table2) def test___eq__type_differ(self): - table1 = self._makeOne('table_id', None) + table1 = self._makeOne(self.TABLE_ID, None) table2 = object() self.assertNotEqual(table1, table2) def test___ne__same_value(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) comparison_val = (table1 != table2) self.assertFalse(comparison_val) def test___ne__(self): - table1 = self._makeOne('table_id1', 'cluster1') - table2 = self._makeOne('table_id2', 'cluster2') + table1 = self._makeOne('table_id1', 'instance1') + table2 = self._makeOne('table_id2', 'instance2') self.assertNotEqual(table1, table2) def _create_test_helper(self, initial_split_keys): from gcloud._helpers import _to_bytes from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 150 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb splits_pb = [ @@ -152,8 +147,8 @@ def _create_test_helper(self, initial_split_keys): for key in initial_split_keys or ()] request_pb = _CreateTableRequestPB( initial_splits=splits_pb, - name=cluster_name, - table_id=table_id, + name=self.INSTANCE_NAME, + table_id=self.TABLE_ID, ) # Create response_pb @@ -170,7 +165,7 @@ def _create_test_helper(self, initial_split_keys): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'CreateTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -185,27 +180,18 @@ def test_create_with_split_keys(self): def _list_column_families_helper(self): from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 502 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = _GetTableRequestPB(name=table_name) + request_pb = _GetTableRequestPB(name=self.TABLE_NAME) # Create response_pb - column_family_id = 'foo' + COLUMN_FAMILY_ID = 'foo' column_family = _ColumnFamilyPB() response_pb = _TablePB( - column_families={column_family_id: column_family}, + column_families={COLUMN_FAMILY_ID: column_family}, ) # Patch the stub used by the API method. @@ -213,7 +199,7 @@ def _list_column_families_helper(self): # Create expected_result. expected_result = { - column_family_id: table.column_family(column_family_id), + COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID), } # Perform the method and check the result. @@ -221,7 +207,7 @@ def _list_column_families_helper(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'GetTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -232,21 +218,12 @@ def test_delete(self): from google.protobuf import empty_pb2 from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 871 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = _DeleteTableRequestPB(name=table_name) + request_pb = _DeleteTableRequestPB(name=self.TABLE_NAME) # Create response_pb response_pb = empty_pb2.Empty() @@ -262,7 +239,7 @@ def test_delete(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'DeleteTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -272,15 +249,14 @@ def _read_row_helper(self, chunks, expected_result): from gcloud.bigtable import table as MUT project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' + instance_id = 'instance-id' table_id = 'table-id' timeout_seconds = 596 client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + instance_name = ('projects/' + project_id + + '/instances/' + instance_id) + instance = _Instance(instance_name, client=client) + table = self._makeOne(table_id, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -354,15 +330,14 @@ def test_read_rows(self): from gcloud.bigtable import table as MUT project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' + instance_id = 'instance-id' table_id = 'table-id' timeout_seconds = 1111 client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + instance_name = ('projects/' + project_id + + '/instances/' + instance_id) + instance = _Instance(instance_name, client=client) + table = self._makeOne(table_id, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -409,19 +384,18 @@ def test_sample_row_keys(self): from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' + instance_id = 'instance-id' table_id = 'table-id' timeout_seconds = 1333 client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + instance_name = ('projects/' + project_id + + '/instances/' + instance_id) + instance = _Instance(instance_name, client=client) + table = self._makeOne(table_id, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id + table_name = instance_name + '/tables/' + table_id request_pb = _SampleRowKeysRequestPB( table_name=table_name) @@ -591,7 +565,7 @@ def _ColumnFamilyPB(*args, **kw): class _Client(object): data_stub = None - cluster_stub = None + instance_stub = None operations_stub = None table_stub = None @@ -599,7 +573,7 @@ def __init__(self, timeout_seconds=None): self.timeout_seconds = timeout_seconds -class _Cluster(object): +class _Instance(object): def __init__(self, name, client=None): self.name = name From 498271ea74497f3504ec143f0161c1e6805c0df7 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 27 Jun 2016 17:34:58 -0400 Subject: [PATCH 035/103] Add V2 Instance type. --- gcloud/bigtable/instance.py | 422 +++++++++++++++++++ gcloud/bigtable/test_instance.py | 697 +++++++++++++++++++++++++++++++ 2 files changed, 1119 insertions(+) create mode 100644 gcloud/bigtable/instance.py create mode 100644 gcloud/bigtable/test_instance.py diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py new file mode 100644 index 000000000000..cb5b03c3f266 --- /dev/null +++ b/gcloud/bigtable/instance.py @@ -0,0 +1,422 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Instance.""" + + +import re + +from google.longrunning import operations_pb2 + +from gcloud._helpers import _pb_timestamp_to_datetime +from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_messages_v1_pb2) +from gcloud.bigtable.table import Table + + +_INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' + r'instances/(?P[a-z][-a-z0-9]*)$') +_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/' + r'instances/([a-z][-a-z0-9]*)/operations/' + r'(?P\d+)$') +_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' +_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.v2.' +_INSTANCE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateInstanceMetadata' +_TYPE_URL_MAP = { + _INSTANCE_CREATE_METADATA: messages_v1_pb2.CreateInstanceMetadata, +} + + +def _prepare_create_request(instance): + """Creates a protobuf request for a CreateInstance request. + + :type instance: :class:`Instance` + :param instance: The instance to be created. + + :rtype: :class:`.messages_v1_pb2.CreateInstanceRequest` + :returns: The CreateInstance request object containing the instance info. + """ + parent_name = ('projects/' + instance._client.project) + return messages_v1_pb2.CreateInstanceRequest( + name=parent_name, + instance_id=instance.instance_id, + instance=data_v2_pb2.Instance( + display_name=instance.display_name, + ), + ) + + +def _parse_pb_any_to_native(any_val, expected_type=None): + """Convert a serialized "google.protobuf.Any" value to actual type. + + :type any_val: :class:`google.protobuf.any_pb2.Any` + :param any_val: A serialized protobuf value container. + + :type expected_type: str + :param expected_type: (Optional) The type URL we expect ``any_val`` + to have. + + :rtype: object + :returns: The de-serialized object. + :raises: :class:`ValueError ` if the + ``expected_type`` does not match the ``type_url`` on the input. + """ + if expected_type is not None and expected_type != any_val.type_url: + raise ValueError('Expected type: %s, Received: %s' % ( + expected_type, any_val.type_url)) + container_class = _TYPE_URL_MAP[any_val.type_url] + return container_class.FromString(any_val.value) + + +def _process_operation(operation_pb): + """Processes a create protobuf response. + + :type operation_pb: :class:`google.longrunning.operations_pb2.Operation` + :param operation_pb: The long-running operation response from a + Create/Update/Undelete instance request. + + :rtype: tuple + :returns: A pair of an integer and datetime stamp. The integer is the ID + of the operation (``operation_id``) and the timestamp when + the create operation began (``operation_begin``). + :raises: :class:`ValueError ` if the operation name + doesn't match the :data:`_OPERATION_NAME_RE` regex. + """ + match = _OPERATION_NAME_RE.match(operation_pb.name) + if match is None: + raise ValueError('Operation name was not in the expected ' + 'format after a instance modification.', + operation_pb.name) + operation_id = int(match.group('operation_id')) + + request_metadata = _parse_pb_any_to_native(operation_pb.metadata) + operation_begin = _pb_timestamp_to_datetime( + request_metadata.request_time) + + return operation_id, operation_begin + + +class Operation(object): + """Representation of a Google API Long-Running Operation. + + In particular, these will be the result of operations on + instances using the Cloud Bigtable API. + + :type op_type: str + :param op_type: The type of operation being performed. Expect + ``create``, ``update`` or ``undelete``. + + :type op_id: int + :param op_id: The ID of the operation. + + :type begin: :class:`datetime.datetime` + :param begin: The time when the operation was started. + + :type instance: :class:`Instance` + :param instance: The instance that created the operation. + """ + + def __init__(self, op_type, op_id, begin, instance=None): + self.op_type = op_type + self.op_id = op_id + self.begin = begin + self._instance = instance + self._complete = False + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.op_type == self.op_type and + other.op_id == self.op_id and + other.begin == self.begin and + other._instance == self._instance and + other._complete == self._complete) + + def __ne__(self, other): + return not self.__eq__(other) + + def finished(self): + """Check if the operation has finished. + + :rtype: bool + :returns: A boolean indicating if the current operation has completed. + :raises: :class:`ValueError ` if the operation + has already completed. + """ + if self._complete: + raise ValueError('The operation has completed.') + + operation_name = ('operations/' + self._instance.name + + '/operations/%d' % (self.op_id,)) + request_pb = operations_pb2.GetOperationRequest(name=operation_name) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._instance._client._operations_stub.GetOperation( + request_pb, self._instance._client.timeout_seconds) + + if operation_pb.done: + self._complete = True + return True + else: + return False + + +class Instance(object): + """Representation of a Google Cloud Bigtable Instance. + + We can use a :class:`Instance` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + * :meth:`undelete` itself + + .. note:: + + For now, we leave out the ``default_storage_type`` (an enum) + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type client: :class:`Client ` + :param client: The client that owns the instance. Provides + authorization and a project ID. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in the + Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + """ + + def __init__(self, instance_id, client, + display_name=None): + self.instance_id = instance_id + self.display_name = display_name or instance_id + self._client = client + + def table(self, table_id): + """Factory to create a table associated with this instance. + + :type table_id: str + :param table_id: The ID of the table. + + :rtype: :class:`Table ` + :returns: The table owned by this instance. + """ + return Table(table_id, self) + + def _update_from_pb(self, instance_pb): + """Refresh self from the server-provided protobuf. + + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not instance_pb.display_name: # Simple field (string) + raise ValueError('Instance protobuf does not contain display_name') + self.display_name = instance_pb.display_name + + @classmethod + def from_pb(cls, instance_pb, client): + """Creates a instance instance from a protobuf. + + :type instance_pb: :class:`instance_pb2.Instance` + :param instance_pb: A instance protobuf object. + + :type client: :class:`Client ` + :param client: The client that owns the instance. + + :rtype: :class:`Instance` + :returns: The instance parsed from the protobuf response. + :raises: :class:`ValueError ` if the instance + name does not match + ``projects/{project}/instances/{instance_id}`` + or if the parsed project ID does not match the project ID + on the client. + """ + match = _INSTANCE_NAME_RE.match(instance_pb.name) + if match is None: + raise ValueError('Instance protobuf name was not in the ' + 'expected format.', instance_pb.name) + if match.group('project') != client.project: + raise ValueError('Project ID on instance does not match the ' + 'project ID on the client') + + result = cls(match.group('instance_id'), client) + result._update_from_pb(instance_pb) + return result + + def copy(self): + """Make a copy of this instance. + + Copies the local data stored as simple types and copies the client + attached to this instance. + + :rtype: :class:`.Instance` + :returns: A copy of the current instance. + """ + new_client = self._client.copy() + return self.__class__(self.instance_id, new_client, + display_name=self.display_name) + + @property + def name(self): + """Instance name used in requests. + + .. note:: + This property will not change if ``instance_id`` does not, + but the return value is not cached. + + The instance name is of the form + + ``"projects/{project}/instances/{instance_id}"`` + + :rtype: str + :returns: The instance name. + """ + return (self._client.project_name + '/instances/' + self.instance_id) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the display_name. Instead, it only compares + # identifying values instance ID and client. This is + # intentional, since the same instance can be in different states + # if not synchronized. Instances with similar instance + # settings but different clients can't be used in the same way. + return (other.instance_id == self.instance_id and + other._client == self._client) + + def __ne__(self, other): + return not self.__eq__(other) + + def reload(self): + """Reload the metadata for this instance.""" + request_pb = messages_v1_pb2.GetInstanceRequest(name=self.name) + # We expect `data_v2_pb2.Instance`. + instance_pb = self._client._instance_stub.GetInstance( + request_pb, self._client.timeout_seconds) + + # NOTE: _update_from_pb does not check that the project and + # instance ID on the response match the request. + self._update_from_pb(instance_pb) + + def create(self): + """Create this instance. + + .. note:: + + Uses the ``project`` and ``instance_id`` on the current + :class:`Instance` in addition to the ``display_name``. + To change them before creating, reset the values via + + .. code:: python + + instance.display_name = 'New display name' + instance.instance_id = 'i-changed-my-mind' + + before calling :meth:`create`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + create operation. + """ + request_pb = _prepare_create_request(self) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._client._instance_stub.CreateInstance( + request_pb, self._client.timeout_seconds) + + op_id, op_begin = _process_operation(operation_pb) + return Operation('create', op_id, op_begin, instance=self) + + def update(self): + """Update this instance. + + .. note:: + + Updates the ``display_name``. To change that value before + updating, reset its values via + + .. code:: python + + instance.display_name = 'New display name' + + before calling :meth:`update`. + """ + request_pb = data_v2_pb2.Instance( + name=self.name, + display_name=self.display_name, + ) + # Ignore the expected `data_v2_pb2.Instance`. + self._client._instance_stub.UpdateInstance( + request_pb, self._client.timeout_seconds) + + def delete(self): + """Delete this instance. + + Marks a instance and all of its tables for permanent deletion in 7 days. + + Immediately upon completion of the request: + + * Billing will cease for all of the instance's reserved resources. + * The instance's ``delete_time`` field will be set 7 days in the future. + + Soon afterward: + + * All tables within the instance will become unavailable. + + Prior to the instance's ``delete_time``: + + * The instance can be recovered with a call to ``UndeleteInstance``. + * All other attempts to modify or delete the instance will be rejected. + + At the instance's ``delete_time``: + + * The instance and **all of its tables** will immediately and + irrevocably disappear from the API, and their data will be + permanently deleted. + """ + request_pb = messages_v1_pb2.DeleteInstanceRequest(name=self.name) + # We expect a `google.protobuf.empty_pb2.Empty` + self._client._instance_stub.DeleteInstance( + request_pb, self._client.timeout_seconds) + + def list_tables(self): + """List the tables in this instance. + + :rtype: list of :class:`Table ` + :returns: The list of tables owned by the instance. + :raises: :class:`ValueError ` if one of the + returned tables has a name that is not of the expected format. + """ + request_pb = table_messages_v1_pb2.ListTablesRequest(name=self.name) + # We expect a `table_messages_v1_pb2.ListTablesResponse` + table_list_pb = self._client._table_stub.ListTables( + request_pb, self._client.timeout_seconds) + + result = [] + for table_pb in table_list_pb.tables: + table_prefix = self.name + '/tables/' + if not table_pb.name.startswith(table_prefix): + raise ValueError('Table name %s not of expected format' % ( + table_pb.name,)) + table_id = table_pb.name[len(table_prefix):] + result.append(self.table(table_id)) + + return result diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py new file mode 100644 index 000000000000..ceb8df151eb7 --- /dev/null +++ b/gcloud/bigtable/test_instance.py @@ -0,0 +1,697 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import unittest2 + + +class TestOperation(unittest2.TestCase): + + OP_TYPE = 'fake-op' + OP_ID = 8915 + BEGIN = datetime.datetime(2015, 10, 22, 1, 1) + + def _getTargetClass(self): + from gcloud.bigtable.instance import Operation + return Operation + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _constructor_test_helper(self, instance=None): + operation = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + + self.assertEqual(operation.op_type, self.OP_TYPE) + self.assertEqual(operation.op_id, self.OP_ID) + self.assertEqual(operation.begin, self.BEGIN) + self.assertEqual(operation._instance, instance) + self.assertFalse(operation._complete) + + def test_constructor_defaults(self): + self._constructor_test_helper() + + def test_constructor_explicit_instance(self): + instance = object() + self._constructor_test_helper(instance=instance) + + def test___eq__(self): + instance = object() + operation1 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + operation2 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.assertEqual(operation1, operation2) + + def test___eq__type_differ(self): + operation1 = self._makeOne('foo', 123, None) + operation2 = object() + self.assertNotEqual(operation1, operation2) + + def test___ne__same_value(self): + instance = object() + operation1 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + operation2 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + comparison_val = (operation1 != operation2) + self.assertFalse(comparison_val) + + def test___ne__(self): + operation1 = self._makeOne('foo', 123, None) + operation2 = self._makeOne('bar', 456, None) + self.assertNotEqual(operation1, operation2) + + def test_finished_without_operation(self): + operation = self._makeOne(None, None, None) + operation._complete = True + with self.assertRaises(ValueError): + operation.finished() + + def _finished_helper(self, done): + from google.longrunning import operations_pb2 + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable.instance import Instance + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + TIMEOUT_SECONDS = 1 + + client = _Client(PROJECT, timeout_seconds=TIMEOUT_SECONDS) + instance = Instance(INSTANCE_ID, client) + operation = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + + # Create request_pb + op_name = ('operations/projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/operations/%d' % (self.OP_ID,)) + request_pb = operations_pb2.GetOperationRequest(name=op_name) + + # Create response_pb + response_pb = operations_pb2.Operation(done=done) + + # Patch the stub used by the API method. + client._operations_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = done + + # Perform the method and check the result. + result = operation.finished() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetOperation', + (request_pb, TIMEOUT_SECONDS), + {}, + )]) + + if done: + self.assertTrue(operation._complete) + else: + self.assertFalse(operation._complete) + + def test_finished(self): + self._finished_helper(done=True) + + def test_finished_not_done(self): + self._finished_helper(done=False) + + +class TestInstance(unittest2.TestCase): + + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = ('projects/' + PROJECT + '/instances/' + INSTANCE_ID) + DISPLAY_NAME = 'display_name' + OP_ID = 8915 + OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % + (PROJECT, INSTANCE_ID, OP_ID)) + TABLE_ID = 'table_id' + TIMEOUT_SECONDS = 1 + + def _getTargetClass(self): + from gcloud.bigtable.instance import Instance + return Instance + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + client = object() + + instance = self._makeOne(self.INSTANCE_ID, client) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, self.INSTANCE_ID) + self.assertTrue(instance._client is client) + + def test_constructor_non_default(self): + display_name = 'display_name' + client = object() + + instance = self._makeOne(self.INSTANCE_ID, client, + display_name=display_name) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, display_name) + self.assertTrue(instance._client is client) + + def test_copy(self): + display_name = 'display_name' + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, + display_name=display_name) + new_instance = instance.copy() + + # Make sure the client copy succeeded. + self.assertFalse(new_instance._client is client) + self.assertEqual(new_instance._client, client) + # Make sure the client got copied to a new instance. + self.assertFalse(instance is new_instance) + self.assertEqual(instance, new_instance) + + def test_table_factory(self): + from gcloud.bigtable.table import Table + + instance = self._makeOne(self.INSTANCE_ID, None) + + table = instance.table(self.TABLE_ID) + self.assertTrue(isinstance(table, Table)) + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertEqual(table._instance, instance) + + def test__update_from_pb_success(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + display_name = 'display_name' + instance_pb = data_v2_pb2.Instance( + display_name=display_name, + ) + + instance = self._makeOne(None, None, None) + self.assertEqual(instance.display_name, None) + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, display_name) + + def test__update_from_pb_no_display_name(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + instance_pb = data_v2_pb2.Instance() + instance = self._makeOne(None, None, None) + self.assertEqual(instance.display_name, None) + with self.assertRaises(ValueError): + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, None) + + def test_from_pb_success(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + client = _Client(project=self.PROJECT) + + instance_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.INSTANCE_ID, + ) + + klass = self._getTargetClass() + instance = klass.from_pb(instance_pb, client) + self.assertTrue(isinstance(instance, klass)) + self.assertEqual(instance._client, client) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + + def test_from_pb_bad_instance_name(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + instance_name = 'INCORRECT_FORMAT' + instance_pb = data_v2_pb2.Instance(name=instance_name) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, None) + + def test_from_pb_project_mistmatch(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(project=ALT_PROJECT) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + + instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, client) + + def test_name_property(self): + client = _Client(project=self.PROJECT) + + instance = self._makeOne(self.INSTANCE_ID, client) + self.assertEqual(instance.name, self.INSTANCE_NAME) + + def test___eq__(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client) + instance2 = self._makeOne(self.INSTANCE_ID, client) + self.assertEqual(instance1, instance2) + + def test___eq__type_differ(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client) + instance2 = object() + self.assertNotEqual(instance1, instance2) + + def test___ne__same_value(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client) + instance2 = self._makeOne(self.INSTANCE_ID, client) + comparison_val = (instance1 != instance2) + self.assertFalse(comparison_val) + + def test___ne__(self): + instance1 = self._makeOne('instance_id1', 'client1') + instance2 = self._makeOne('instance_id2', 'client2') + self.assertNotEqual(instance1, instance2) + + def test_reload(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_pb + request_pb = messages_v2_pb.GetInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + DISPLAY_NAME = u'hey-hi-hello' + response_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, self.INSTANCE_ID) + + # Perform the method and check the result. + result = instance.reload() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, DISPLAY_NAME) + + def test_create(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable import instance as MUT + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_pb. Just a mock since we monkey patch + # _prepare_create_request + request_pb = object() + + # Create response_pb + op_begin = object() + response_pb = operations_pb2.Operation(name=self.OP_NAME) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = MUT.Operation('create', self.OP_ID, op_begin, + instance=instance) + + # Create the mocks. + prep_create_called = [] + + def mock_prep_create_req(instance): + prep_create_called.append(instance) + return request_pb + + process_operation_called = [] + + def mock_process_operation(operation_pb): + process_operation_called.append(operation_pb) + return self.OP_ID, op_begin + + # Perform the method and check the result. + with _Monkey(MUT, + _prepare_create_request=mock_prep_create_req, + _process_operation=mock_process_operation): + result = instance.create() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'CreateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + self.assertEqual(prep_create_called, [instance]) + self.assertEqual(process_operation_called, [response_pb]) + + def test_update(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, + display_name=self.DISPLAY_NAME) + + # Create request_pb + request_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.DISPLAY_NAME, + ) + + # Create response_pb + response_pb = data_v2_pb2.Instance() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None + + # Perform the method and check the result. + result = instance.update() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'UpdateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_delete(self): + from google.protobuf import empty_pb2 + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_pb + request_pb = messages_v2_pb.DeleteInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = instance.delete() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'DeleteInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def _list_tables_helper(self, table_id, table_name=None): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_messages_v1_pb2) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_ + request_pb = table_messages_v1_pb2.ListTablesRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + if table_name is None: + table_name = self.INSTANCE_NAME + '/tables/' + self.TABLE_ID + + response_pb = table_messages_v1_pb2.ListTablesResponse( + tables=[ + table_data_v2_pb2.Table(name=table_name), + ], + ) + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_table = instance.table(self.TABLE_ID) + expected_result = [expected_table] + + # Perform the method and check the result. + result = instance.list_tables() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListTables', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_list_tables(self): + self._list_tables_helper(self.TABLE_ID) + + def test_list_tables_failure_bad_split(self): + with self.assertRaises(ValueError): + self._list_tables_helper(None, table_name='wrong-format') + + def test_list_tables_failure_name_bad_before(self): + BAD_TABLE_NAME = ('nonempty-section-before' + + 'projects/' + self.PROJECT + + '/instances/' + self.INSTANCE_ID + + '/tables/' + self.TABLE_ID) + with self.assertRaises(ValueError): + self._list_tables_helper(self.TABLE_ID, table_name=BAD_TABLE_NAME) + + +class Test__prepare_create_request(unittest2.TestCase): + + def _callFUT(self, instance): + from gcloud.bigtable.instance import _prepare_create_request + return _prepare_create_request(instance) + + def test_it(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable.instance import Instance + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = u'DISPLAY_NAME' + SERVE_NODES = 8 + client = _Client(PROJECT) + + instance = Instance(INSTANCE_ID, client, display_name=DISPLAY_NAME) + request_pb = self._callFUT(instance) + self.assertTrue(isinstance(request_pb, + messages_v2_pb.CreateInstanceRequest)) + self.assertEqual(request_pb.instance_id, INSTANCE_ID) + self.assertEqual(request_pb.name, + 'projects/' + PROJECT) + self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance)) + self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME) + + +class Test__parse_pb_any_to_native(unittest2.TestCase): + + def _callFUT(self, any_val, expected_type=None): + from gcloud.bigtable.instance import _parse_pb_any_to_native + return _parse_pb_any_to_native(any_val, expected_type=expected_type) + + def test_with_known_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + from gcloud.bigtable import instance as MUT + + TYPE_URL = 'type.googleapis.com/' + data_v2_pb2._CELL.full_name + fake_type_url_map = {TYPE_URL: data_v2_pb2.Cell} + + cell = data_v2_pb2.Cell( + timestamp_micros=0, + value=b'foobar', + ) + any_val = any_pb2.Any( + type_url=TYPE_URL, + value=cell.SerializeToString(), + ) + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + result = self._callFUT(any_val) + + self.assertEqual(result, cell) + + def test_with_create_instance_metadata(self): + from google.protobuf import any_pb2 + from google.protobuf.timestamp_pb2 import Timestamp + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + + TYPE_URL = ('type.googleapis.com/' + + messages_v2_pb._CREATEINSTANCEMETADATA.full_name) + metadata = messages_v2_pb.CreateInstanceMetadata( + request_time=Timestamp(seconds=1, nanos=1234), + finish_time=Timestamp(seconds=10, nanos=891011), + original_request=messages_v2_pb.CreateInstanceRequest( + name='foo', + instance_id='bar', + instance=data_v2_pb2.Instance( + display_name='quux', + ), + ), + ) + + any_val = any_pb2.Any( + type_url=TYPE_URL, + value=metadata.SerializeToString(), + ) + result = self._callFUT(any_val) + self.assertEqual(result, metadata) + + def test_unknown_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable import instance as MUT + + fake_type_url_map = {} + any_val = any_pb2.Any() + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + with self.assertRaises(KeyError): + self._callFUT(any_val) + + def test_disagreeing_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable import instance as MUT + + TYPE_URL1 = 'foo' + TYPE_URL2 = 'bar' + fake_type_url_map = {TYPE_URL1: None} + any_val = any_pb2.Any(type_url=TYPE_URL2) + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + with self.assertRaises(ValueError): + self._callFUT(any_val, expected_type=TYPE_URL1) + + +class Test__process_operation(unittest2.TestCase): + + def _callFUT(self, operation_pb): + from gcloud.bigtable.instance import _process_operation + return _process_operation(operation_pb) + + def test_it(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable import instance as MUT + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + EXPECTED_OPERATION_ID = 234 + OPERATION_NAME = ( + 'operations/projects/%s/instances/%s/operations/%d' % + (PROJECT, INSTANCE_ID, EXPECTED_OPERATION_ID)) + + current_op = operations_pb2.Operation(name=OPERATION_NAME) + + # Create mocks. + request_metadata = messages_v2_pb.CreateInstanceMetadata() + parse_pb_any_called = [] + + def mock_parse_pb_any_to_native(any_val, expected_type=None): + parse_pb_any_called.append((any_val, expected_type)) + return request_metadata + + expected_operation_begin = object() + ts_to_dt_called = [] + + def mock_pb_timestamp_to_datetime(timestamp): + ts_to_dt_called.append(timestamp) + return expected_operation_begin + + # Exectute method with mocks in place. + with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, + _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): + operation_id, operation_begin = self._callFUT(current_op) + + # Check outputs. + self.assertEqual(operation_id, EXPECTED_OPERATION_ID) + self.assertTrue(operation_begin is expected_operation_begin) + + # Check mocks were used correctly. + self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) + self.assertEqual(ts_to_dt_called, [request_metadata.request_time]) + + def test_op_name_parsing_failure(self): + from google.longrunning import operations_pb2 + + operation_pb = operations_pb2.Operation(name='invalid') + with self.assertRaises(ValueError): + self._callFUT(operation_pb) + + +class _Client(object): + + def __init__(self, project, timeout_seconds=None): + self.project = project + self.project_name = 'projects/' + self.project + self.timeout_seconds = timeout_seconds + + def copy(self): + from copy import deepcopy + return deepcopy(self) + + def __eq__(self, other): + return (other.project == self.project and + other.project_name == self.project_name and + other.timeout_seconds == self.timeout_seconds) From 84ac521e37fdd61fe9bffe30c2dcb042cc75ce22 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 27 Jun 2016 22:09:40 -0400 Subject: [PATCH 036/103] Closes #1922, resolves issues with GAE and expanduser(). --- gcloud/_helpers.py | 11 +++++++++-- gcloud/test__helpers.py | 10 ++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/gcloud/_helpers.py b/gcloud/_helpers.py index 56ded4c46dcc..d869e2e3ecc7 100644 --- a/gcloud/_helpers.py +++ b/gcloud/_helpers.py @@ -188,12 +188,19 @@ def _default_service_project_id(): :rtype: str or ``NoneType`` :returns: Project-ID from default configuration file else ``None`` """ - full_config_path = os.path.expanduser(DEFAULT_CONFIGURATION_PATH) + search_paths = [] + # Workaround for GAE not supporting pwd which is used by expanduser. + try: + full_config_path = os.path.expanduser(DEFAULT_CONFIGURATION_PATH) + search_paths.append(full_config_path) + except ImportError: + full_config_path = '' win32_config_path = os.path.join(os.getenv('APPDATA', ''), 'gcloud', 'configurations', 'config_default') + search_paths.append(win32_config_path) config = configparser.RawConfigParser() - config.read([full_config_path, win32_config_path]) + config.read(search_paths) if config.has_section('core'): return config.get('core', 'project') diff --git a/gcloud/test__helpers.py b/gcloud/test__helpers.py index 14f901010a20..a7646af2e963 100644 --- a/gcloud/test__helpers.py +++ b/gcloud/test__helpers.py @@ -211,6 +211,7 @@ def callFUT(self, project_id=None): def mock_expanduser(path=''): if project_id and path.startswith('~'): + __import__('pwd') # Simulate actual expanduser imports. return self.temp_config_file return '' @@ -221,6 +222,15 @@ def test_read_from_cli_info(self): project_id = self.callFUT('test-project-id') self.assertEqual('test-project-id', project_id) + def test_gae_without_expanduser(self): + import sys + import pwd + del pwd + sys.modules['pwd'] = None # Blocks pwd from being imported. + project_id = self.callFUT('test-project-id') + self.assertEqual(None, project_id) + del sys.modules['pwd'] # Unblocks importing of pwd. + def test_info_value_not_present(self): project_id = self.callFUT() self.assertEqual(None, project_id) From e144b091fa3015797a41d0fad24da1c9cad63f03 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 27 Jun 2016 21:47:44 -0400 Subject: [PATCH 037/103] Convert instance admin to V2 protobufs. --- .../bigtable_instance_admin_pb2.py | 182 +++++ gcloud/bigtable/_generated_v2/bigtable_pb2.py | 119 ++++ .../_generated_v2/bigtable_table_admin_pb2.py | 122 ++++ gcloud/bigtable/client.py | 147 ++-- gcloud/bigtable/cluster.py | 220 ++---- gcloud/bigtable/instance.py | 85 ++- gcloud/bigtable/test_client.py | 193 +---- gcloud/bigtable/test_cluster.py | 662 ++++++------------ gcloud/bigtable/test_instance.py | 79 ++- gcloud/bigtable/test_table.py | 47 +- 10 files changed, 952 insertions(+), 904 deletions(-) diff --git a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py index 344918dc1c44..4d02b6e71bb8 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py @@ -613,4 +613,186 @@ DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True _CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +import abc +import six +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +class BetaBigtableInstanceAdminServicer(object): + """""" + def CreateInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListInstances(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CreateCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListClusters(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + +class BetaBigtableInstanceAdminStub(object): + """The interface to which stubs will conform.""" + def CreateInstance(self, request, timeout): + raise NotImplementedError() + CreateInstance.future = None + def GetInstance(self, request, timeout): + raise NotImplementedError() + GetInstance.future = None + def ListInstances(self, request, timeout): + raise NotImplementedError() + ListInstances.future = None + def UpdateInstance(self, request, timeout): + raise NotImplementedError() + UpdateInstance.future = None + def DeleteInstance(self, request, timeout): + raise NotImplementedError() + DeleteInstance.future = None + def CreateCluster(self, request, timeout): + raise NotImplementedError() + CreateCluster.future = None + def GetCluster(self, request, timeout): + raise NotImplementedError() + GetCluster.future = None + def ListClusters(self, request, timeout): + raise NotImplementedError() + ListClusters.future = None + def UpdateCluster(self, request, timeout): + raise NotImplementedError() + UpdateCluster.future = None + def DeleteCluster(self, request, timeout): + raise NotImplementedError() + DeleteCluster.future = None + +def beta_create_BigtableInstanceAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + request_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.longrunning.operations_pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): face_utilities.unary_unary_inline(servicer.CreateInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): face_utilities.unary_unary_inline(servicer.DeleteInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): face_utilities.unary_unary_inline(servicer.GetInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): face_utilities.unary_unary_inline(servicer.ListInstances), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): face_utilities.unary_unary_inline(servicer.UpdateInstance), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + +def beta_create_BigtableInstanceAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + request_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.longrunning.operations_pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.longrunning.operations_pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.longrunning.operations_pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + } + cardinalities = { + 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, + 'CreateInstance': cardinality.Cardinality.UNARY_UNARY, + 'DeleteCluster': cardinality.Cardinality.UNARY_UNARY, + 'DeleteInstance': cardinality.Cardinality.UNARY_UNARY, + 'GetCluster': cardinality.Cardinality.UNARY_UNARY, + 'GetInstance': cardinality.Cardinality.UNARY_UNARY, + 'ListClusters': cardinality.Cardinality.UNARY_UNARY, + 'ListInstances': cardinality.Cardinality.UNARY_UNARY, + 'UpdateCluster': cardinality.Cardinality.UNARY_UNARY, + 'UpdateInstance': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableInstanceAdmin', cardinalities, options=stub_options) # @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_pb2.py index 5c9e39dc4e89..ffb5f5fa3eba 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_pb2.py @@ -804,4 +804,123 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) +import abc +import six +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +class BetaBigtableServicer(object): + """""" + def ReadRows(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SampleRowKeys(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRow(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRows(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CheckAndMutateRow(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ReadModifyWriteRow(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + +class BetaBigtableStub(object): + """The interface to which stubs will conform.""" + def ReadRows(self, request, timeout): + raise NotImplementedError() + def SampleRowKeys(self, request, timeout): + raise NotImplementedError() + def MutateRow(self, request, timeout): + raise NotImplementedError() + MutateRow.future = None + def MutateRows(self, request, timeout): + raise NotImplementedError() + def CheckAndMutateRow(self, request, timeout): + raise NotImplementedError() + CheckAndMutateRow.future = None + def ReadModifyWriteRow(self, request, timeout): + raise NotImplementedError() + ReadModifyWriteRow.future = None + +def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + request_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysRequest.FromString, + } + response_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysResponse.SerializeToString, + } + method_implementations = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRows'): face_utilities.unary_stream_inline(servicer.MutateRows), + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), + ('google.bigtable.v2.Bigtable', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + +def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + request_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysResponse.FromString, + } + cardinalities = { + 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRows': cardinality.Cardinality.UNARY_STREAM, + 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, + 'ReadRows': cardinality.Cardinality.UNARY_STREAM, + 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.v2.Bigtable', cardinalities, options=stub_options) # @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py index bd695d5f6b3e..8a884a8b91e3 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py @@ -504,4 +504,126 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) +import abc +import six +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +class BetaBigtableTableAdminServicer(object): + """""" + def CreateTable(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListTables(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetTable(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteTable(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ModifyColumnFamilies(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DropRowRange(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + +class BetaBigtableTableAdminStub(object): + """The interface to which stubs will conform.""" + def CreateTable(self, request, timeout): + raise NotImplementedError() + CreateTable.future = None + def ListTables(self, request, timeout): + raise NotImplementedError() + ListTables.future = None + def GetTable(self, request, timeout): + raise NotImplementedError() + GetTable.future = None + def DeleteTable(self, request, timeout): + raise NotImplementedError() + DeleteTable.future = None + def ModifyColumnFamilies(self, request, timeout): + raise NotImplementedError() + ModifyColumnFamilies.future = None + def DropRowRange(self, request, timeout): + raise NotImplementedError() + DropRowRange.future = None + +def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + request_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.CreateTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DeleteTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DropRowRangeRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.GetTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ModifyColumnFamiliesRequest.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): face_utilities.unary_unary_inline(servicer.DropRowRange), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): face_utilities.unary_unary_inline(servicer.ModifyColumnFamilies), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + +def beta_create_BigtableTableAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + request_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.CreateTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DeleteTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DropRowRangeRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.GetTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ModifyColumnFamiliesRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.table_pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.table_pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.table_pb2.Table.FromString, + } + cardinalities = { + 'CreateTable': cardinality.Cardinality.UNARY_UNARY, + 'DeleteTable': cardinality.Cardinality.UNARY_UNARY, + 'DropRowRange': cardinality.Cardinality.UNARY_UNARY, + 'GetTable': cardinality.Cardinality.UNARY_UNARY, + 'ListTables': cardinality.Cardinality.UNARY_UNARY, + 'ModifyColumnFamilies': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableTableAdmin', cardinalities, options=stub_options) # @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 0b97922894e4..2ec8f6e89bd2 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -18,8 +18,8 @@ In the hierarchy of API concepts -* a :class:`Client` owns a :class:`.Cluster` -* a :class:`.Cluster` owns a :class:`Table ` +* a :class:`Client` owns a :class:`.Instance` +* a :class:`.Instance` owns a :class:`Table ` * a :class:`Table ` owns a :class:`ColumnFamily <.column_family.ColumnFamily>` * a :class:`Table ` owns a :class:`Row <.row.Row>` @@ -31,52 +31,47 @@ from grpc.beta import implementations -# Cluster admin service is V1-only (V2 provides instance admin instead) -from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as cluster_data_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_cluster_service_pb2 as cluster_service_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as cluster_messages_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as instance_admin_v2_pb2) # V1 table admin service -from gcloud.bigtable._generated import ( - bigtable_table_service_pb2 as table_service_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) # V1 data service -from gcloud.bigtable._generated import ( - bigtable_service_pb2 as data_service_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as data_v2_pb2) from gcloud.bigtable._generated import ( operations_grpc_pb2 as operations_grpc_v1_pb2) -from gcloud.bigtable.cluster import Cluster +from gcloud.bigtable.instance import Instance from gcloud.client import _ClientFactoryMixin from gcloud.client import _ClientProjectMixin from gcloud.credentials import get_credentials -TABLE_STUB_FACTORY_V1 = ( - table_service_v1_pb2.beta_create_BigtableTableService_stub) -TABLE_ADMIN_HOST_V1 = 'bigtabletableadmin.googleapis.com' +TABLE_STUB_FACTORY_V2 = ( + table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub) +TABLE_ADMIN_HOST_V2 = 'bigtabletableadmin.googleapis.com' """Table Admin API request host.""" -TABLE_ADMIN_PORT_V1 = 443 +TABLE_ADMIN_PORT_V2 = 443 """Table Admin API request port.""" -CLUSTER_STUB_FACTORY_V1 = ( - cluster_service_v1_pb2.beta_create_BigtableClusterService_stub) -CLUSTER_ADMIN_HOST_V1 = 'bigtableclusteradmin.googleapis.com' +INSTANCE_STUB_FACTORY_V2 = ( + instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub) +INSTANCE_ADMIN_HOST_V2 = 'bigtableclusteradmin.googleapis.com' """Cluster Admin API request host.""" -CLUSTER_ADMIN_PORT_V1 = 443 +INSTANCE_ADMIN_PORT_V2 = 443 """Cluster Admin API request port.""" -DATA_STUB_FACTORY_V1 = data_service_v1_pb2.beta_create_BigtableService_stub -DATA_API_HOST_V1 = 'bigtable.googleapis.com' +DATA_STUB_FACTORY_V2 = data_v2_pb2.beta_create_Bigtable_stub +DATA_API_HOST_V2 = 'bigtable.googleapis.com' """Data API request host.""" -DATA_API_PORT_V1 = 443 +DATA_API_PORT_V2 = 443 """Data API request port.""" -OPERATIONS_STUB_FACTORY_V1 = operations_grpc_v1_pb2.beta_create_Operations_stub -OPERATIONS_API_HOST_V1 = CLUSTER_ADMIN_HOST_V1 -OPERATIONS_API_PORT_V1 = CLUSTER_ADMIN_PORT_V1 +OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v1_pb2.beta_create_Operations_stub +OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2 +OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2 ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' """Scope for interacting with the Cluster Admin and Table Admin APIs.""" @@ -120,7 +115,7 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): :type admin: bool :param admin: (Optional) Boolean indicating if the client will be used to - interact with the Cluster Admin or Table Admin APIs. This + interact with the Instance Admin or Table Admin APIs. This requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. :type user_agent: str @@ -203,7 +198,7 @@ def credentials(self): @property def project_name(self): - """Project name to be used with Cluster Admin API. + """Project name to be used with Instance Admin API. .. note:: @@ -235,7 +230,7 @@ def _data_stub(self): @property def _cluster_stub(self): - """Getter for the gRPC stub used for the Cluster Admin API. + """Getter for the gRPC stub used for the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. @@ -287,29 +282,29 @@ def _make_data_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, DATA_STUB_FACTORY_V1, - DATA_API_HOST_V1, DATA_API_PORT_V1) + return _make_stub(self, DATA_STUB_FACTORY_V2, + DATA_API_HOST_V2, DATA_API_PORT_V2) def _make_cluster_stub(self): - """Creates gRPC stub to make requests to the Cluster Admin API. + """Creates gRPC stub to make requests to the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, CLUSTER_STUB_FACTORY_V1, - CLUSTER_ADMIN_HOST_V1, CLUSTER_ADMIN_PORT_V1) + return _make_stub(self, INSTANCE_STUB_FACTORY_V2, + INSTANCE_ADMIN_HOST_V2, INSTANCE_ADMIN_PORT_V2) def _make_operations_stub(self): """Creates gRPC stub to make requests to the Operations API. - These are for long-running operations of the Cluster Admin API, + These are for long-running operations of the Instance Admin API, hence the host and port matching. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, OPERATIONS_STUB_FACTORY_V1, - OPERATIONS_API_HOST_V1, OPERATIONS_API_PORT_V1) + return _make_stub(self, OPERATIONS_STUB_FACTORY_V2, + OPERATIONS_API_HOST_V2, OPERATIONS_API_PORT_V2) def _make_table_stub(self): """Creates gRPC stub to make requests to the Table Admin API. @@ -317,8 +312,8 @@ def _make_table_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, TABLE_STUB_FACTORY_V1, - TABLE_ADMIN_HOST_V1, TABLE_ADMIN_PORT_V1) + return _make_stub(self, TABLE_STUB_FACTORY_V2, + TABLE_ADMIN_HOST_V2, TABLE_ADMIN_PORT_V2) def is_started(self): """Check if the client has been started. @@ -380,72 +375,22 @@ def __exit__(self, exc_type, exc_val, exc_t): """Stops the client as a context manager.""" self.stop() - def cluster(self, zone, cluster_id, display_name=None, serve_nodes=3): - """Factory to create a cluster associated with this client. - - :type zone: str - :param zone: The name of the zone where the cluster resides. + def instance(self, instance_id, display_name=None): + """Factory to create a instance associated with this client. - :type cluster_id: str - :param cluster_id: The ID of the cluster. + :type instance_id: str + :param instance_id: The ID of the instance. :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 + :param display_name: (Optional) The display name for the instance in + the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the - constructor, will fall back to the cluster ID. + constructor, will fall back to the instance ID. - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - Defaults to 3. - - :rtype: :class:`.Cluster` - :returns: The cluster owned by this client. - """ - return Cluster(zone, cluster_id, self, - display_name=display_name, serve_nodes=serve_nodes) - - def list_zones(self): - """Lists zones associated with project. - - :rtype: list - :returns: The names (as :class:`str`) of the zones - :raises: :class:`ValueError ` if one of the - zones is not in ``OK`` state. - """ - request_pb = cluster_messages_v1_pb2.ListZonesRequest( - name=self.project_name) - # We expect a `.cluster_messages_v1_pb2.ListZonesResponse` - list_zones_response = self._cluster_stub.ListZones( - request_pb, self.timeout_seconds) - - result = [] - for zone in list_zones_response.zones: - if zone.status != cluster_data_v1_pb2.Zone.OK: - raise ValueError('Zone %s not in OK state' % ( - zone.display_name,)) - result.append(zone.display_name) - return result - - def list_clusters(self): - """Lists clusters owned by the project. - - :rtype: tuple - :returns: A pair of results, the first is a list of :class:`.Cluster` s - returned and the second is a list of strings (the failed - zones in the request). + :rtype: :class:`.Instance` + :returns: an instance owned by this client. """ - request_pb = cluster_messages_v1_pb2.ListClustersRequest( - name=self.project_name) - # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` - list_clusters_response = self._cluster_stub.ListClusters( - request_pb, self.timeout_seconds) - - failed_zones = [zone.display_name - for zone in list_clusters_response.failed_zones] - clusters = [Cluster.from_pb(cluster_pb, self) - for cluster_pb in list_clusters_response.clusters] - return clusters, failed_zones + return Instance(instance_id, self, display_name=display_name) class _MetadataPlugin(object): diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index 85bd6966ad03..a65a5daa945a 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -19,28 +19,21 @@ from google.longrunning import operations_pb2 -from gcloud._helpers import _pb_timestamp_to_datetime -from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' - r'zones/(?P[^/]+)/clusters/' + r'instances/(?P[^/]+)/clusters/' r'(?P[a-z][-a-z0-9]*)$') -_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/zones/([^/]+)/' - r'clusters/([a-z][-a-z0-9]*)/operations/' - r'(?P\d+)$') -_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' -_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.cluster.v1.' -_CLUSTER_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateClusterMetadata' -_UPDATE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UpdateClusterMetadata' -_UNDELETE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UndeleteClusterMetadata' +_OPERATION_NAME_RE = re.compile(r'^operations/' + r'projects/([^/]+)/' + r'instances/([^/]+)/' + r'clusters/([a-z][-a-z0-9]*)/' + r'operations/(?P\d+)$') _TYPE_URL_MAP = { - _CLUSTER_CREATE_METADATA: messages_v1_pb2.CreateClusterMetadata, - _UPDATE_CREATE_METADATA: messages_v1_pb2.UpdateClusterMetadata, - _UNDELETE_CREATE_METADATA: messages_v1_pb2.UndeleteClusterMetadata, } DEFAULT_SERVE_NODES = 3 @@ -53,16 +46,13 @@ def _prepare_create_request(cluster): :type cluster: :class:`Cluster` :param cluster: The cluster to be created. - :rtype: :class:`.messages_v1_pb2.CreateClusterRequest` + :rtype: :class:`.messages_v2_pb2.CreateClusterRequest` :returns: The CreateCluster request object containing the cluster info. """ - zone_full_name = ('projects/' + cluster._client.project + - '/zones/' + cluster.zone) - return messages_v1_pb2.CreateClusterRequest( - name=zone_full_name, + return messages_v2_pb2.CreateClusterRequest( + name=cluster._instance.name, cluster_id=cluster.cluster_id, - cluster=data_v1_pb2.Cluster( - display_name=cluster.display_name, + cluster=data_v2_pb2.Cluster( serve_nodes=cluster.serve_nodes, ), ) @@ -98,9 +88,7 @@ def _process_operation(operation_pb): Create/Update/Undelete cluster request. :rtype: tuple - :returns: A pair of an integer and datetime stamp. The integer is the ID - of the operation (``operation_id``) and the timestamp when - the create operation began (``operation_begin``). + :returns: integer ID of the operation (``operation_id``). :raises: :class:`ValueError ` if the operation name doesn't match the :data:`_OPERATION_NAME_RE` regex. """ @@ -111,11 +99,7 @@ def _process_operation(operation_pb): operation_pb.name) operation_id = int(match.group('operation_id')) - request_metadata = _parse_pb_any_to_native(operation_pb.metadata) - operation_begin = _pb_timestamp_to_datetime( - request_metadata.request_time) - - return operation_id, operation_begin + return operation_id class Operation(object): @@ -131,17 +115,13 @@ class Operation(object): :type op_id: int :param op_id: The ID of the operation. - :type begin: :class:`datetime.datetime` - :param begin: The time when the operation was started. - :type cluster: :class:`Cluster` :param cluster: The cluster that created the operation. """ - def __init__(self, op_type, op_id, begin, cluster=None): + def __init__(self, op_type, op_id, cluster=None): self.op_type = op_type self.op_id = op_id - self.begin = begin self._cluster = cluster self._complete = False @@ -150,7 +130,6 @@ def __eq__(self, other): return False return (other.op_type == self.op_type and other.op_id == self.op_id and - other.begin == self.begin and other._cluster == self._cluster and other._complete == self._complete) @@ -172,8 +151,9 @@ def finished(self): '/operations/%d' % (self.op_id,)) request_pb = operations_pb2.GetOperationRequest(name=operation_name) # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb = self._cluster._client._operations_stub.GetOperation( - request_pb, self._cluster._client.timeout_seconds) + client = self._cluster._instance._client + operation_pb = client._operations_stub.GetOperation( + request_pb, client.timeout_seconds) if operation_pb.done: self._complete = True @@ -196,76 +176,67 @@ class Cluster(object): .. note:: For now, we leave out the ``default_storage_type`` (an enum) - which if not sent will end up as :data:`.data_v1_pb2.STORAGE_SSD`. - - :type zone: str - :param zone: The name of the zone where the cluster resides. + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. :type cluster_id: str :param cluster_id: The ID of the cluster. - :type client: :class:`Client ` - :param client: The client that owns the cluster. Provides - authorization and a project ID. - - :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the cluster ID. + :type instance: :class:`.instance.Instance` + :param instance: The instance where the cluster resides. :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. Defaults to :data:`DEFAULT_SERVE_NODES`. """ - def __init__(self, zone, cluster_id, client, - display_name=None, serve_nodes=DEFAULT_SERVE_NODES): - self.zone = zone + def __init__(self, cluster_id, instance, + serve_nodes=DEFAULT_SERVE_NODES): self.cluster_id = cluster_id - self.display_name = display_name or cluster_id + self._instance = instance self.serve_nodes = serve_nodes - self._client = client + self.location = None def _update_from_pb(self, cluster_pb): """Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`. """ - if not cluster_pb.display_name: # Simple field (string) - raise ValueError('Cluster protobuf does not contain display_name') if not cluster_pb.serve_nodes: # Simple field (int32) raise ValueError('Cluster protobuf does not contain serve_nodes') - self.display_name = cluster_pb.display_name self.serve_nodes = cluster_pb.serve_nodes + self.location = cluster_pb.location @classmethod - def from_pb(cls, cluster_pb, client): + def from_pb(cls, cluster_pb, instance): """Creates a cluster instance from a protobuf. :type cluster_pb: :class:`bigtable_cluster_data_pb2.Cluster` :param cluster_pb: A cluster protobuf object. - :type client: :class:`Client ` - :param client: The client that owns the cluster. + :type instance: :class:`.instance.Instance>` + :param instance: The instance that owns the cluster. :rtype: :class:`Cluster` :returns: The cluster parsed from the protobuf response. - :raises: :class:`ValueError ` if the cluster - name does not match - ``projects/{project}/zones/{zone}/clusters/{cluster_id}`` - or if the parsed project ID does not match the project ID - on the client. + :raises: + :class:`ValueError ` if the cluster + name does not match + ``projects/{project}/instances/{instance}/clusters/{cluster_id}`` + or if the parsed project ID does not match the project ID + on the client. """ match = _CLUSTER_NAME_RE.match(cluster_pb.name) if match is None: raise ValueError('Cluster protobuf name was not in the ' 'expected format.', cluster_pb.name) - if match.group('project') != client.project: + if match.group('project') != instance._client.project: raise ValueError('Project ID on cluster does not match the ' 'project ID on the client') + if match.group('instance') != instance.instance_id: + raise ValueError('Instance ID on cluster does not match the ' + 'instance ID on the client') - result = cls(match.group('zone'), match.group('cluster_id'), client) + result = cls(match.group('cluster_id'), instance) result._update_from_pb(cluster_pb) return result @@ -278,9 +249,8 @@ def copy(self): :rtype: :class:`.Cluster` :returns: A copy of the current cluster. """ - new_client = self._client.copy() - return self.__class__(self.zone, self.cluster_id, new_client, - display_name=self.display_name, + new_instance = self._instance.copy() + return self.__class__(self.cluster_id, new_instance, serve_nodes=self.serve_nodes) @property @@ -288,43 +258,41 @@ def name(self): """Cluster name used in requests. .. note:: - This property will not change if ``zone`` and ``cluster_id`` do not, - but the return value is not cached. + This property will not change if ``_instance`` and ``cluster_id`` + do not, but the return value is not cached. The cluster name is of the form - ``"projects/{project}/zones/{zone}/clusters/{cluster_id}"`` + ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"`` :rtype: str :returns: The cluster name. """ - return (self._client.project_name + '/zones/' + self.zone + - '/clusters/' + self.cluster_id) + return self._instance.name + '/clusters/' + self.cluster_id def __eq__(self, other): if not isinstance(other, self.__class__): return False # NOTE: This does not compare the configuration values, such as - # the serve_nodes or display_name. Instead, it only compares - # identifying values zone, cluster ID and client. This is + # the serve_nodes. Instead, it only compares + # identifying values instance, cluster ID and client. This is # intentional, since the same cluster can be in different states - # if not synchronized. Clusters with similar zone/cluster + # if not synchronized. Clusters with similar instance/cluster # settings but different clients can't be used in the same way. - return (other.zone == self.zone and - other.cluster_id == self.cluster_id and - other._client == self._client) + return (other.cluster_id == self.cluster_id and + other._instance == self._instance) def __ne__(self, other): return not self.__eq__(other) def reload(self): """Reload the metadata for this cluster.""" - request_pb = messages_v1_pb2.GetClusterRequest(name=self.name) + request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.GetCluster( - request_pb, self._client.timeout_seconds) + cluster_pb = self._instance._client._cluster_stub.GetCluster( + request_pb, self._instance._client.timeout_seconds) - # NOTE: _update_from_pb does not check that the project, zone and + # NOTE: _update_from_pb does not check that the project, instance and # cluster ID on the response match the request. self._update_from_pb(cluster_pb) @@ -333,14 +301,13 @@ def create(self): .. note:: - Uses the ``project``, ``zone`` and ``cluster_id`` on the current - :class:`Cluster` in addition to the ``display_name`` and - ``serve_nodes``. If you'd like to change them before creating, - reset the values via + Uses the ``project``, ``instance`` and ``cluster_id`` on the + current :class:`Cluster` in addition to the ``serve_nodes``. + To change them before creating, reset the values via .. code:: python - cluster.display_name = 'New display name' + cluster.serve_nodes = 8 cluster.cluster_id = 'i-changed-my-mind' before calling :meth:`create`. @@ -351,24 +318,23 @@ def create(self): """ request_pb = _prepare_create_request(self) # We expect a `google.longrunning.operations_pb2.Operation`. - cluster_pb = self._client._cluster_stub.CreateCluster( - request_pb, self._client.timeout_seconds) + operation_pb = self._instance._client._cluster_stub.CreateCluster( + request_pb, self._instance._client.timeout_seconds) - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('create', op_id, op_begin, cluster=self) + op_id = _process_operation(operation_pb) + return Operation('create', op_id, cluster=self) def update(self): """Update this cluster. .. note:: - Updates the ``display_name`` and ``serve_nodes``. If you'd like to + Updates the ``serve_nodes``. If you'd like to change them before updating, reset the values via .. code:: python - cluster.display_name = 'New display name' - cluster.serve_nodes = 3 + cluster.serve_nodes = 8 before calling :meth:`update`. @@ -376,17 +342,16 @@ def update(self): :returns: The long-running operation corresponding to the update operation. """ - request_pb = data_v1_pb2.Cluster( + request_pb = data_v2_pb2.Cluster( name=self.name, - display_name=self.display_name, serve_nodes=self.serve_nodes, ) - # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.UpdateCluster( - request_pb, self._client.timeout_seconds) + # Ignore expected `._generated.bigtable_cluster_data_pb2.Cluster`. + operation_pb = self._instance._client._cluster_stub.UpdateCluster( + request_pb, self._instance._client.timeout_seconds) - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('update', op_id, op_begin, cluster=self) + op_id = _process_operation(operation_pb) + return Operation('update', op_id, cluster=self) def delete(self): """Delete this cluster. @@ -413,40 +378,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - request_pb = messages_v1_pb2.DeleteClusterRequest(name=self.name) + request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name) # We expect a `google.protobuf.empty_pb2.Empty` - self._client._cluster_stub.DeleteCluster( - request_pb, self._client.timeout_seconds) - - def undelete(self): - """Undelete this cluster. - - Cancels the scheduled deletion of an cluster and begins preparing it to - resume serving. The returned operation will also be embedded as the - cluster's ``current_operation``. - - Immediately upon completion of this request: - - * The cluster's ``delete_time`` field will be unset, protecting it from - automatic deletion. - - Until completion of the returned operation: - - * The operation cannot be cancelled. - - Upon completion of the returned operation: - - * Billing for the cluster's resources will resume. - * All tables within the cluster will be available. - - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - undelete operation. - """ - request_pb = messages_v1_pb2.UndeleteClusterRequest(name=self.name) - # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb2 = self._client._cluster_stub.UndeleteCluster( - request_pb, self._client.timeout_seconds) - - op_id, op_begin = _process_operation(operation_pb2) - return Operation('undelete', op_id, op_begin, cluster=self) + self._instance._client._cluster_stub.DeleteCluster( + request_pb, self._instance._client.timeout_seconds) diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py index cb5b03c3f266..ba29e43a57c2 100644 --- a/gcloud/bigtable/instance.py +++ b/gcloud/bigtable/instance.py @@ -23,14 +23,15 @@ from gcloud.bigtable._generated_v2 import ( instance_pb2 as data_v2_pb2) from gcloud.bigtable._generated_v2 import ( - bigtable_instance_admin_pb2 as messages_v1_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2) from gcloud.bigtable._generated_v2 import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2) + bigtable_table_admin_pb2 as table_messages_v2_pb2) +from gcloud.bigtable.cluster import Cluster from gcloud.bigtable.table import Table _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' - r'instances/(?P[a-z][-a-z0-9]*)$') + r'instances/(?P[a-z][-a-z0-9]*)$') _OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/' r'instances/([a-z][-a-z0-9]*)/operations/' r'(?P\d+)$') @@ -38,7 +39,7 @@ _ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.v2.' _INSTANCE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateInstanceMetadata' _TYPE_URL_MAP = { - _INSTANCE_CREATE_METADATA: messages_v1_pb2.CreateInstanceMetadata, + _INSTANCE_CREATE_METADATA: messages_v2_pb2.CreateInstanceMetadata, } @@ -48,11 +49,11 @@ def _prepare_create_request(instance): :type instance: :class:`Instance` :param instance: The instance to be created. - :rtype: :class:`.messages_v1_pb2.CreateInstanceRequest` + :rtype: :class:`.messages_v2_pb2.CreateInstanceRequest` :returns: The CreateInstance request object containing the instance info. """ parent_name = ('projects/' + instance._client.project) - return messages_v1_pb2.CreateInstanceRequest( + return messages_v2_pb2.CreateInstanceRequest( name=parent_name, instance_id=instance.instance_id, instance=data_v2_pb2.Instance( @@ -211,17 +212,6 @@ def __init__(self, instance_id, client, self.display_name = display_name or instance_id self._client = client - def table(self, table_id): - """Factory to create a table associated with this instance. - - :type table_id: str - :param table_id: The ID of the table. - - :rtype: :class:`Table ` - :returns: The table owned by this instance. - """ - return Table(table_id, self) - def _update_from_pb(self, instance_pb): """Refresh self from the server-provided protobuf. @@ -289,7 +279,7 @@ def name(self): :rtype: str :returns: The instance name. """ - return (self._client.project_name + '/instances/' + self.instance_id) + return self._client.project_name + '/instances/' + self.instance_id def __eq__(self, other): if not isinstance(other, self.__class__): @@ -308,7 +298,7 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this instance.""" - request_pb = messages_v1_pb2.GetInstanceRequest(name=self.name) + request_pb = messages_v2_pb2.GetInstanceRequest(name=self.name) # We expect `data_v2_pb2.Instance`. instance_pb = self._client._instance_stub.GetInstance( request_pb, self._client.timeout_seconds) @@ -370,12 +360,14 @@ def update(self): def delete(self): """Delete this instance. - Marks a instance and all of its tables for permanent deletion in 7 days. + Marks a instance and all of its tables for permanent deletion + in 7 days. Immediately upon completion of the request: * Billing will cease for all of the instance's reserved resources. - * The instance's ``delete_time`` field will be set 7 days in the future. + * The instance's ``delete_time`` field will be set 7 days in + the future. Soon afterward: @@ -392,11 +384,56 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - request_pb = messages_v1_pb2.DeleteInstanceRequest(name=self.name) + request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name) # We expect a `google.protobuf.empty_pb2.Empty` self._client._instance_stub.DeleteInstance( request_pb, self._client.timeout_seconds) + def cluster(self, cluster_id, serve_nodes=3): + """Factory to create a cluster associated with this client. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + Defaults to 3. + + :rtype: :class:`.Cluster` + :returns: The cluster owned by this client. + """ + return Cluster(cluster_id, self, serve_nodes=serve_nodes) + + def list_clusters(self): + """Lists clusters in this instance. + + :rtype: tuple + :returns: A pair of results, the first is a list of :class:`.Cluster` s + returned and the second is a list of strings (the failed + locations in the request). + """ + request_pb = messages_v2_pb2.ListClustersRequest(name=self.name) + # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` + list_clusters_response = self._client._instance_stub.ListClusters( + request_pb, self._client.timeout_seconds) + + failed_locations = [ + location for location in list_clusters_response.failed_locations] + clusters = [Cluster.from_pb(cluster_pb, self) + for cluster_pb in list_clusters_response.clusters] + return clusters, failed_locations + + def table(self, table_id): + """Factory to create a table associated with this instance. + + :type table_id: str + :param table_id: The ID of the table. + + :rtype: :class:`Table ` + :returns: The table owned by this instance. + """ + return Table(table_id, self) + def list_tables(self): """List the tables in this instance. @@ -405,8 +442,8 @@ def list_tables(self): :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. """ - request_pb = table_messages_v1_pb2.ListTablesRequest(name=self.name) - # We expect a `table_messages_v1_pb2.ListTablesResponse` + request_pb = table_messages_v2_pb2.ListTablesRequest(name=self.name) + # We expect a `table_messages_v2_pb2.ListTablesResponse` table_list_pb = self._client._table_stub.ListTables( request_pb, self._client.timeout_seconds) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index ccd276fdb750..eeff14a5aab4 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -289,9 +289,9 @@ def test_table_stub_unset_failure(self): def test__make_data_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import DATA_API_HOST_V1 - from gcloud.bigtable.client import DATA_API_PORT_V1 - from gcloud.bigtable.client import DATA_STUB_FACTORY_V1 + from gcloud.bigtable.client import DATA_API_HOST_V2 + from gcloud.bigtable.client import DATA_API_PORT_V2 + from gcloud.bigtable.client import DATA_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -311,18 +311,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - DATA_STUB_FACTORY_V1, - DATA_API_HOST_V1, - DATA_API_PORT_V1, + DATA_STUB_FACTORY_V2, + DATA_API_HOST_V2, + DATA_API_PORT_V2, ), ]) def test__make_cluster_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST_V1 - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT_V1 - from gcloud.bigtable.client import CLUSTER_STUB_FACTORY_V1 + from gcloud.bigtable.client import INSTANCE_ADMIN_HOST_V2 + from gcloud.bigtable.client import INSTANCE_ADMIN_PORT_V2 + from gcloud.bigtable.client import INSTANCE_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -342,18 +342,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - CLUSTER_STUB_FACTORY_V1, - CLUSTER_ADMIN_HOST_V1, - CLUSTER_ADMIN_PORT_V1, + INSTANCE_STUB_FACTORY_V2, + INSTANCE_ADMIN_HOST_V2, + INSTANCE_ADMIN_PORT_V2, ), ]) def test__make_operations_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import OPERATIONS_API_HOST_V1 - from gcloud.bigtable.client import OPERATIONS_API_PORT_V1 - from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY_V1 + from gcloud.bigtable.client import OPERATIONS_API_HOST_V2 + from gcloud.bigtable.client import OPERATIONS_API_PORT_V2 + from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -373,18 +373,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - OPERATIONS_STUB_FACTORY_V1, - OPERATIONS_API_HOST_V1, - OPERATIONS_API_PORT_V1, + OPERATIONS_STUB_FACTORY_V2, + OPERATIONS_API_HOST_V2, + OPERATIONS_API_PORT_V2, ), ]) def test__make_table_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import TABLE_ADMIN_HOST_V1 - from gcloud.bigtable.client import TABLE_ADMIN_PORT_V1 - from gcloud.bigtable.client import TABLE_STUB_FACTORY_V1 + from gcloud.bigtable.client import TABLE_ADMIN_HOST_V2 + from gcloud.bigtable.client import TABLE_ADMIN_PORT_V2 + from gcloud.bigtable.client import TABLE_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -404,9 +404,9 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - TABLE_STUB_FACTORY_V1, - TABLE_ADMIN_HOST_V1, - TABLE_ADMIN_PORT_V1, + TABLE_STUB_FACTORY_V2, + TABLE_ADMIN_HOST_V2, + TABLE_ADMIN_PORT_V2, ), ]) @@ -521,144 +521,21 @@ def test_stop_while_stopped(self): # Make sure the cluster stub did not change. self.assertEqual(client._cluster_stub_internal, cluster_stub) - def test_cluster_factory(self): - from gcloud.bigtable.cluster import Cluster + def test_instance_factory(self): + from gcloud.bigtable.instance import Instance - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display-name' - serve_nodes = 42 - cluster = client.cluster(zone, cluster_id, display_name=display_name, - serve_nodes=serve_nodes) - self.assertTrue(isinstance(cluster, Cluster)) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) - - def _list_zones_helper(self, zone_status): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 281330 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) - - # Create request_pb - request_pb = messages_v1_pb2.ListZonesRequest( - name='projects/' + project, - ) - - # Create response_pb - zone1 = 'foo' - zone2 = 'bar' - response_pb = messages_v1_pb2.ListZonesResponse( - zones=[ - data_v1_pb2.Zone(display_name=zone1, status=zone_status), - data_v1_pb2.Zone(display_name=zone2, status=zone_status), - ], - ) - - # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = [zone1, zone2] - - # Perform the method and check the result. - result = client.list_zones() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListZones', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_zones(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - self._list_zones_helper(data_v1_pb2.Zone.OK) - - def test_list_zones_failure(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - with self.assertRaises(ValueError): - self._list_zones_helper(data_v1_pb2.Zone.EMERGENCY_MAINENANCE) - - def test_list_clusters(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub + client = self._makeOne(project=PROJECT, credentials=credentials) - credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 8004 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) - - # Create request_pb - request_pb = messages_v1_pb2.ListClustersRequest( - name='projects/' + project, - ) - - # Create response_pb - zone = 'foo' - failed_zone = 'bar' - cluster_id1 = 'cluster-id1' - cluster_id2 = 'cluster-id2' - cluster_name1 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id1) - cluster_name2 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id2) - response_pb = messages_v1_pb2.ListClustersResponse( - failed_zones=[ - data_v1_pb2.Zone(display_name=failed_zone), - ], - clusters=[ - data_v1_pb2.Cluster( - name=cluster_name1, - display_name=cluster_name1, - serve_nodes=3, - ), - data_v1_pb2.Cluster( - name=cluster_name2, - display_name=cluster_name2, - serve_nodes=3, - ), - ], - ) - - # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - failed_zones = [failed_zone] - clusters = [ - client.cluster(zone, cluster_id1), - client.cluster(zone, cluster_id2), - ] - expected_result = (clusters, failed_zones) - - # Perform the method and check the result. - result = client.list_clusters() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListClusters', - (request_pb, timeout_seconds), - {}, - )]) + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) + self.assertTrue(isinstance(instance, Instance)) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertTrue(instance._client is client) class Test_MetadataPlugin(unittest2.TestCase): diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index d884ae8783dc..6ddca98bf92e 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -26,15 +26,12 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def _constructor_test_helper(self, cluster=None): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation = self._makeOne(op_type, op_id, cluster=cluster) self.assertEqual(operation.op_type, op_type) self.assertEqual(operation.op_id, op_id) - self.assertEqual(operation.begin, begin) self.assertEqual(operation._cluster, cluster) self.assertFalse(operation._complete) @@ -46,13 +43,11 @@ def test_constructor_explicit_cluster(self): self._constructor_test_helper(cluster=cluster) def test___eq__(self): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation1 = self._makeOne(op_type, op_id, cluster=cluster) + operation2 = self._makeOne(op_type, op_id, cluster=cluster) self.assertEqual(operation1, operation2) def test___eq__type_differ(self): @@ -61,13 +56,11 @@ def test___eq__type_differ(self): self.assertNotEqual(operation1, operation2) def test___ne__same_value(self): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation1 = self._makeOne(op_type, op_id, cluster=cluster) + operation2 = self._makeOne(op_type, op_id, cluster=cluster) comparison_val = (operation1 != operation2) self.assertFalse(comparison_val) @@ -83,27 +76,27 @@ def test_finished_without_operation(self): operation.finished() def _finished_helper(self, done): - import datetime from google.longrunning import operations_pb2 from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.cluster import Cluster - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - op_type = 'fake-op' - op_id = 789 - begin = datetime.datetime(2015, 10, 22, 1, 1) + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + OP_TYPE = 'fake-op' + OP_ID = 789 timeout_seconds = 1 - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = Cluster(zone, cluster_id, client) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) + client = _Client(PROJECT, timeout_seconds=timeout_seconds) + instance = _Instance(INSTANCE_ID, client) + cluster = Cluster(CLUSTER_ID, instance) + operation = self._makeOne(OP_TYPE, OP_ID, cluster=cluster) # Create request_pb - op_name = ('operations/projects/' + project + '/zones/' + - zone + '/clusters/' + cluster_id + - '/operations/%d' % (op_id,)) + op_name = ('operations/projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/clusters/' + CLUSTER_ID + + '/operations/%d' % (OP_ID,)) request_pb = operations_pb2.GetOperationRequest(name=op_name) # Create response_pb @@ -139,6 +132,14 @@ def test_finished_not_done(self): class TestCluster(unittest2.TestCase): + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + CLUSTER_NAME = ('projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/clusters/' + CLUSTER_ID) + TIMEOUT_SECONDS = 123 + def _getTargetClass(self): from gcloud.bigtable.cluster import Cluster return Cluster @@ -147,220 +148,176 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_constructor_defaults(self): - zone = 'zone' - cluster_id = 'cluster-id' - client = object() + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(zone, cluster_id, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, cluster_id) - self.assertEqual(cluster.serve_nodes, 3) - self.assertTrue(cluster._client is client) + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_constructor_non_default(self): - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - client = object() - - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) + SERVE_NODES = 8 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test_copy(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - - client = _Client(project) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) + SERVE_NODES = 8 + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) new_cluster = cluster.copy() # Make sure the client copy succeeded. - self.assertFalse(new_cluster._client is client) - self.assertEqual(new_cluster._client, client) + self.assertFalse(new_cluster._instance is instance) + self.assertEqual(new_cluster.serve_nodes, SERVE_NODES) # Make sure the client got copied to a new instance. self.assertFalse(cluster is new_cluster) self.assertEqual(cluster, new_cluster) def test__update_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - display_name = 'display_name' - serve_nodes = 8 - cluster_pb = data_v1_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, + SERVE_NODES = 8 + cluster_pb = _ClusterPB( + serve_nodes=SERVE_NODES, ) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) + cluster = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - - def test__update_from_pb_no_display_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - - cluster_pb = data_v1_pb2.Cluster(serve_nodes=331) - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - with self.assertRaises(ValueError): - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test__update_from_pb_no_serve_nodes(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - cluster_pb = data_v1_pb2.Cluster(display_name='name') - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) + cluster_pb = _ClusterPB() + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) with self.assertRaises(ValueError): cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) - - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_v1_pb2.Cluster( - name=cluster_name, - display_name=cluster_id, - serve_nodes=331, + SERVE_NODES = 331 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, ) klass = self._getTargetClass() - cluster = klass.from_pb(cluster_pb, client) + cluster = klass.from_pb(cluster_pb, instance) self.assertTrue(isinstance(cluster, klass)) - self.assertEqual(cluster._client, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test_from_pb_bad_cluster_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - - cluster_name = 'INCORRECT_FORMAT' - cluster_pb = data_v1_pb2.Cluster(name=cluster_name) + BAD_CLUSTER_NAME = 'INCORRECT_FORMAT' + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME) klass = self._getTargetClass() with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, None) + klass.from_pb(cluster_pb, instance) def test_from_pb_project_mistmatch(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(ALT_PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - alt_project = 'ALT_PROJECT' - client = _Client(project=alt_project) + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) - self.assertNotEqual(project, alt_project) + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_v1_pb2.Cluster(name=cluster_name) + def test_from_pb_instance_mistmatch(self): + ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + client = _Client(self.PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) + + self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) + + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) klass = self._getTargetClass() with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, client) + klass.from_pb(cluster_pb, instance) def test_name_property(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(zone, cluster_id, client) - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - self.assertEqual(cluster.name, cluster_name) + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.name, self.CLUSTER_NAME) def test___eq__(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster1, cluster2) def test___eq__type_differ(self): - cluster1 = self._makeOne('zone', 'cluster_id', 'client') + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) cluster2 = object() self.assertNotEqual(cluster1, cluster2) def test___ne__same_value(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) comparison_val = (cluster1 != cluster2) self.assertFalse(comparison_val) def test___ne__(self): - cluster1 = self._makeOne('zone1', 'cluster_id1', 'client1') - cluster2 = self._makeOne('zone2', 'cluster_id2', 'client2') + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne('cluster_id1', instance) + cluster2 = self._makeOne('cluster_id2', instance) self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 123 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + SERVE_NODES = 31 + LOCATION = 'LOCATION' + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_v1_pb2.GetClusterRequest(name=cluster_name) + request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME) # Create response_pb - serve_nodes = 31 - display_name = u'hey-hi-hello' - response_pb = data_v1_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, + response_pb = _ClusterPB( + serve_nodes=SERVE_NODES, + location=LOCATION, ) # Patch the stub used by the API method. @@ -371,55 +328,46 @@ def test_reload(self): # Check Cluster optional config values before. self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - self.assertEqual(cluster.display_name, cluster_id) # Perform the method and check the result. result = cluster.reload() self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'GetCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) # Check Cluster optional config values before. - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertEqual(cluster.display_name, display_name) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(cluster.location, LOCATION) def test_create(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 578 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb. Just a mock since we monkey patch # _prepare_create_request request_pb = object() # Create response_pb - op_id = 5678 - op_begin = object() - op_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, op_id)) - current_op = operations_pb2.Operation(name=op_name) - response_pb = data_v1_pb2.Cluster(current_operation=current_op) + OP_ID = 5678 + OP_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) + response_pb = operations_pb2.Operation(name=OP_NAME) # Patch the stub used by the API method. client._cluster_stub = stub = _FakeStub(response_pb) # Create expected_result. - expected_result = MUT.Operation('create', op_id, op_begin, - cluster=cluster) + expected_result = MUT.Operation('create', OP_ID, cluster=cluster) # Create the mocks. prep_create_called = [] @@ -432,7 +380,7 @@ def mock_prep_create_req(cluster): def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return op_id, op_begin + return OP_ID # Perform the method and check the result. with _Monkey(MUT, _prepare_create_request=mock_prep_create_req, @@ -442,60 +390,47 @@ def mock_process_operation(operation_pb): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'CreateCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) self.assertEqual(prep_create_called, [cluster]) - self.assertEqual(process_operation_called, [current_op]) + self.assertEqual(process_operation_called, [response_pb]) def test_update(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - serve_nodes = 81 - display_name = 'display_name' - timeout_seconds = 9 + SERVE_NODES = 81 - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = data_v1_pb2.Cluster( - name=cluster_name, - display_name=display_name, - serve_nodes=serve_nodes, + request_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, ) # Create response_pb - current_op = operations_pb2.Operation() - response_pb = data_v1_pb2.Cluster(current_operation=current_op) + response_pb = operations_pb2.Operation() # Patch the stub used by the API method. client._cluster_stub = stub = _FakeStub(response_pb) # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('update', op_id, op_begin, - cluster=cluster) + OP_ID = 5678 + expected_result = MUT.Operation('update', OP_ID, cluster=cluster) # Create mocks process_operation_called = [] def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return op_id, op_begin + return OP_ID # Perform the method and check the result. with _Monkey(MUT, _process_operation=mock_process_operation): @@ -504,29 +439,21 @@ def mock_process_operation(operation_pb): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'UpdateCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - self.assertEqual(process_operation_called, [current_op]) + self.assertEqual(process_operation_called, [response_pb]) def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 57 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_v1_pb2.DeleteClusterRequest(name=cluster_name) + request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME) # Create response_pb response_pb = empty_pb2.Empty() @@ -543,62 +470,10 @@ def test_delete(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'DeleteCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - def test_undelete(self): - from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 78 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_v1_pb2.UndeleteClusterRequest(name=cluster_name) - - # Create response_pb - response_pb = operations_pb2.Operation() - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('undelete', op_id, op_begin, - cluster=cluster) - - # Create the mocks. - process_operation_called = [] - - def mock_process_operation(operation_pb): - process_operation_called.append(operation_pb) - return op_id, op_begin - - # Perform the method and check the result. - with _Monkey(MUT, _process_operation=mock_process_operation): - result = cluster.undelete() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'UndeleteCluster', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(process_operation_called, [response_pb]) - class Test__prepare_create_request(unittest2.TestCase): @@ -607,30 +482,23 @@ def _callFUT(self, cluster): return _prepare_create_request(cluster) def test_it(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.cluster import Cluster - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = u'DISPLAY_NAME' - serve_nodes = 8 - client = _Client(project) + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + SERVE_NODES = 8 + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster = Cluster(CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) - cluster = Cluster(zone, cluster_id, client, - display_name=display_name, serve_nodes=serve_nodes) request_pb = self._callFUT(cluster) - self.assertTrue(isinstance(request_pb, - messages_v1_pb2.CreateClusterRequest)) - self.assertEqual(request_pb.cluster_id, cluster_id) - self.assertEqual(request_pb.name, - 'projects/' + project + '/zones/' + zone) - self.assertTrue(isinstance(request_pb.cluster, data_v1_pb2.Cluster)) - self.assertEqual(request_pb.cluster.display_name, display_name) - self.assertEqual(request_pb.cluster.serve_nodes, serve_nodes) + + self.assertEqual(request_pb.cluster_id, CLUSTER_ID) + self.assertEqual(request_pb.name, instance.name) + self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) class Test__parse_pb_any_to_native(unittest2.TestCase): @@ -642,17 +510,16 @@ def _callFUT(self, any_val, expected_type=None): def test_with_known_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable import cluster as MUT - type_url = 'type.googleapis.com/' + data_v1_pb2._CELL.full_name - fake_type_url_map = {type_url: data_v1_pb2.Cell} - - cell = data_v1_pb2.Cell( + cell = _CellPB( timestamp_micros=0, value=b'foobar', ) + + type_url = 'type.googleapis.com/' + cell.DESCRIPTOR.full_name + fake_type_url_map = {type_url: cell.__class__} + any_val = any_pb2.Any( type_url=type_url, value=cell.SerializeToString(), @@ -662,83 +529,6 @@ def test_with_known_type_url(self): self.assertEqual(result, cell) - def test_with_create_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - - type_url = ('type.googleapis.com/' + - messages_v1_pb2._CREATECLUSTERMETADATA.full_name) - metadata = messages_v1_pb2.CreateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - original_request=messages_v1_pb2.CreateClusterRequest( - name='foo', - cluster_id='bar', - cluster=data_v1_pb2.Cluster( - display_name='quux', - serve_nodes=1337, - ), - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_update_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - - type_url = ('type.googleapis.com/' + - messages_v1_pb2._UPDATECLUSTERMETADATA.full_name) - metadata = messages_v1_pb2.UpdateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - cancel_time=Timestamp(seconds=100, nanos=76543), - original_request=data_v1_pb2.Cluster( - display_name='the-end', - serve_nodes=42, - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_undelete_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - - type_url = ('type.googleapis.com/' + - messages_v1_pb2._UNDELETECLUSTERMETADATA.full_name) - metadata = messages_v1_pb2.UndeleteClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - def test_unknown_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey @@ -772,58 +562,72 @@ def _callFUT(self, operation_pb): def test_it(self): from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - expected_operation_id = 234 - operation_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, - expected_operation_id)) - - current_op = operations_pb2.Operation(name=operation_name) - # Create mocks. - request_metadata = messages_v1_pb2.CreateClusterMetadata() - parse_pb_any_called = [] + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + EXPECTED_OPERATION_ID = 234 + OPERATION_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (PROJECT, INSTANCE_ID, CLUSTER_ID, EXPECTED_OPERATION_ID)) - def mock_parse_pb_any_to_native(any_val, expected_type=None): - parse_pb_any_called.append((any_val, expected_type)) - return request_metadata - - expected_operation_begin = object() - ts_to_dt_called = [] - - def mock_pb_timestamp_to_datetime(timestamp): - ts_to_dt_called.append(timestamp) - return expected_operation_begin + operation_pb = operations_pb2.Operation(name=OPERATION_NAME) # Exectute method with mocks in place. - with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, - _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): - operation_id, operation_begin = self._callFUT(current_op) + operation_id = self._callFUT(operation_pb) # Check outputs. - self.assertEqual(operation_id, expected_operation_id) - self.assertTrue(operation_begin is expected_operation_begin) - - # Check mocks were used correctly. - self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) - self.assertEqual(ts_to_dt_called, [request_metadata.request_time]) + self.assertEqual(operation_id, EXPECTED_OPERATION_ID) def test_op_name_parsing_failure(self): from google.longrunning import operations_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - current_op = operations_pb2.Operation(name='invalid') - cluster = data_v1_pb2.Cluster(current_operation=current_op) + operation_pb = operations_pb2.Operation(name='invalid') with self.assertRaises(ValueError): - self._callFUT(cluster) + self._callFUT(operation_pb) + + +def _CellPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Cell(*args, **kw) + + +def _ClusterPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as instance_v2_pb2) + return instance_v2_pb2.Cluster(*args, **kw) + + +def _DeleteClusterRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.DeleteClusterRequest(*args, **kw) + + +def _GetClusterRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.GetClusterRequest(*args, **kw) + + +class _Instance(object): + + def __init__(self, instance_id, client): + self.instance_id = instance_id + self._client = client + + @property + def name(self): + return 'projects/%s/instances/%s' % ( + self._client.project, self.instance_id) + + def copy(self): + return self.__class__(self.instance_id, self._client) + + def __eq__(self, other): + return (other.instance_id == self.instance_id and + other._client == self._client) class _Client(object): @@ -833,10 +637,6 @@ def __init__(self, project, timeout_seconds=None): self.project_name = 'projects/' + self.project self.timeout_seconds = timeout_seconds - def copy(self): - from copy import deepcopy - return deepcopy(self) - def __eq__(self, other): return (other.project == self.project and other.project_name == self.project_name and diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py index ceb8df151eb7..fbe2f384cfb2 100644 --- a/gcloud/bigtable/test_instance.py +++ b/gcloud/bigtable/test_instance.py @@ -139,8 +139,9 @@ class TestInstance(unittest2.TestCase): DISPLAY_NAME = 'display_name' OP_ID = 8915 OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % - (PROJECT, INSTANCE_ID, OP_ID)) + (PROJECT, INSTANCE_ID, OP_ID)) TABLE_ID = 'table_id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID TIMEOUT_SECONDS = 1 def _getTargetClass(self): @@ -163,7 +164,7 @@ def test_constructor_non_default(self): client = object() instance = self._makeOne(self.INSTANCE_ID, client, - display_name=display_name) + display_name=display_name) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, display_name) self.assertTrue(instance._client is client) @@ -173,7 +174,7 @@ def test_copy(self): client = _Client(self.PROJECT) instance = self._makeOne(self.INSTANCE_ID, client, - display_name=display_name) + display_name=display_name) new_instance = instance.copy() # Make sure the client copy succeeded. @@ -335,8 +336,6 @@ def test_reload(self): def test_create(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated_v2 import ( - instance_pb2 as data_v2_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import instance as MUT @@ -452,7 +451,64 @@ def test_delete(self): {}, )]) - def _list_tables_helper(self, table_id, table_name=None): + def test_list_clusters(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as instance_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + + FAILED_LOCATION = 'FAILED' + FAILED_LOCATIONS = [FAILED_LOCATION] + CLUSTER_ID1 = 'cluster-id1' + CLUSTER_ID2 = 'cluster-id2' + SERVE_NODES = 4 + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) + CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) + # Create request_pb + request_pb = messages_v2_pb2.ListClustersRequest( + name=instance.name, + ) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[FAILED_LOCATION], + clusters=[ + instance_v2_pb2.Cluster( + name=CLUSTER_NAME1, + serve_nodes=SERVE_NODES, + ), + instance_v2_pb2.Cluster( + name=CLUSTER_NAME2, + serve_nodes=SERVE_NODES, + ), + ], + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + clusters = [ + instance.cluster(CLUSTER_ID1), + instance.cluster(CLUSTER_ID2), + ] + expected_result = (clusters, FAILED_LOCATIONS) + + # Perform the method and check the result. + result = instance.list_clusters() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListClusters', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def _list_tables_helper(self, table_name=None): from gcloud.bigtable._generated_v2 import ( table_pb2 as table_data_v2_pb2) from gcloud.bigtable._generated_v2 import ( @@ -468,7 +524,7 @@ def _list_tables_helper(self, table_id, table_name=None): # Create response_pb if table_name is None: - table_name = self.INSTANCE_NAME + '/tables/' + self.TABLE_ID + table_name = self.TABLE_NAME response_pb = table_messages_v1_pb2.ListTablesResponse( tables=[ @@ -494,11 +550,11 @@ def _list_tables_helper(self, table_id, table_name=None): )]) def test_list_tables(self): - self._list_tables_helper(self.TABLE_ID) + self._list_tables_helper() def test_list_tables_failure_bad_split(self): with self.assertRaises(ValueError): - self._list_tables_helper(None, table_name='wrong-format') + self._list_tables_helper(table_name='wrong-format') def test_list_tables_failure_name_bad_before(self): BAD_TABLE_NAME = ('nonempty-section-before' + @@ -506,7 +562,7 @@ def test_list_tables_failure_name_bad_before(self): '/instances/' + self.INSTANCE_ID + '/tables/' + self.TABLE_ID) with self.assertRaises(ValueError): - self._list_tables_helper(self.TABLE_ID, table_name=BAD_TABLE_NAME) + self._list_tables_helper(table_name=BAD_TABLE_NAME) class Test__prepare_create_request(unittest2.TestCase): @@ -525,7 +581,6 @@ def test_it(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = u'DISPLAY_NAME' - SERVE_NODES = 8 client = _Client(PROJECT) instance = Instance(INSTANCE_ID, client, display_name=DISPLAY_NAME) @@ -640,7 +695,7 @@ def test_it(self): EXPECTED_OPERATION_ID = 234 OPERATION_NAME = ( 'operations/projects/%s/instances/%s/operations/%d' % - (PROJECT, INSTANCE_ID, EXPECTED_OPERATION_ID)) + (PROJECT, INSTANCE_ID, EXPECTED_OPERATION_ID)) current_op = operations_pb2.Operation(name=OPERATION_NAME) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index 1eb2d81d3b10..cd47ada3ba22 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -248,15 +248,9 @@ def _read_row_helper(self, chunks, expected_result): from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import table as MUT - project_id = 'project-id' - instance_id = 'instance-id' - table_id = 'table-id' - timeout_seconds = 596 - client = _Client(timeout_seconds=timeout_seconds) - instance_name = ('projects/' + project_id + - '/instances/' + instance_id) - instance = _Instance(instance_name, client=client) - table = self._makeOne(table_id, instance) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -281,7 +275,7 @@ def mock_create_row_request(table_name, row_key, filter_): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) self.assertEqual(mock_created, @@ -329,15 +323,9 @@ def test_read_rows(self): from gcloud.bigtable.row_data import PartialRowsData from gcloud.bigtable import table as MUT - project_id = 'project-id' - instance_id = 'instance-id' - table_id = 'table-id' - timeout_seconds = 1111 - client = _Client(timeout_seconds=timeout_seconds) - instance_name = ('projects/' + project_id + - '/instances/' + instance_id) - instance = _Instance(instance_name, client=client) - table = self._makeOne(table_id, instance) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -369,7 +357,7 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) created_kwargs = { @@ -383,21 +371,12 @@ def mock_create_row_request(table_name, **kwargs): def test_sample_row_keys(self): from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - instance_id = 'instance-id' - table_id = 'table-id' - timeout_seconds = 1333 - - client = _Client(timeout_seconds=timeout_seconds) - instance_name = ('projects/' + project_id + - '/instances/' + instance_id) - instance = _Instance(instance_name, client=client) - table = self._makeOne(table_id, instance) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = instance_name + '/tables/' + table_id - request_pb = _SampleRowKeysRequestPB( - table_name=table_name) + request_pb = _SampleRowKeysRequestPB(table_name=self.TABLE_NAME) # Create response_iterator response_iterator = object() # Just passed to a mock. @@ -413,7 +392,7 @@ def test_sample_row_keys(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'SampleRowKeys', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) From 9880a42904505a0ae0a501f23e52c57bd88bfabf Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 27 Jun 2016 23:02:18 -0400 Subject: [PATCH 038/103] Adjust docs for V2 instance admin patterns. --- docs/bigtable-client-intro.rst | 8 +- docs/bigtable-cluster-api.rst | 187 --------------------------------- docs/bigtable-instance-api.rst | 133 +++++++++++++++++++++++ docs/bigtable-instance.rst | 12 +++ docs/bigtable-table-api.rst | 10 +- docs/index.rst | 3 +- 6 files changed, 156 insertions(+), 197 deletions(-) delete mode 100644 docs/bigtable-cluster-api.rst create mode 100644 docs/bigtable-instance-api.rst create mode 100644 docs/bigtable-instance.rst diff --git a/docs/bigtable-client-intro.rst b/docs/bigtable-client-intro.rst index 55111ad1dfb5..db04ffa0e0c1 100644 --- a/docs/bigtable-client-intro.rst +++ b/docs/bigtable-client-intro.rst @@ -63,7 +63,7 @@ Configuration Admin API Access ---------------- -If you'll be using your client to make `Cluster Admin`_ and `Table Admin`_ +If you'll be using your client to make `Instance Admin`_ and `Table Admin`_ API requests, you'll need to pass the ``admin`` argument: .. code:: python @@ -89,10 +89,10 @@ Next Step --------- After a :class:`Client `, the next highest-level -object is a :class:`Cluster `. You'll need +object is a :class:`Instance `. You'll need one before you can interact with tables or data. -Head next to learn about the :doc:`bigtable-cluster-api`. +Head next to learn about the :doc:`bigtable-instance-api`. -.. _Cluster Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1 +.. _Instance Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1 .. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 diff --git a/docs/bigtable-cluster-api.rst b/docs/bigtable-cluster-api.rst deleted file mode 100644 index 1266fa8e893a..000000000000 --- a/docs/bigtable-cluster-api.rst +++ /dev/null @@ -1,187 +0,0 @@ -Cluster Admin API -================= - -.. warning:: - - gRPC is required for using the Cloud Bigtable API. As of May 2016, - ``grpcio`` is only supported in Python 2.7, so importing - :mod:`gcloud.bigtable` in other versions of Python will fail. - -After creating a :class:`Client `, you can -interact with individual clusters, groups of clusters or available -zones for a project. - -List Clusters -------------- - -If you want a comprehensive list of all existing clusters, make a -`ListClusters`_ API request with -:meth:`Client.list_clusters() `: - -.. code:: python - - clusters = client.list_clusters() - -List Zones ----------- - -If you aren't sure which ``zone`` to create a cluster in, find out -which zones your project has access to with a `ListZones`_ API request -with :meth:`Client.list_zones() `: - -.. code:: python - - zones = client.list_zones() - -You can choose a :class:`string ` from among the result to pass to -the :class:`Cluster ` constructor. - -The available zones (as of February 2016) are - -.. code:: python - - >>> zones - [u'asia-east1-b', u'europe-west1-c', u'us-central1-c', u'us-central1-b'] - -Cluster Factory ---------------- - -To create a :class:`Cluster ` object: - -.. code:: python - - cluster = client.cluster(zone, cluster_id, - display_name=display_name, - serve_nodes=3) - -Both ``display_name`` and ``serve_nodes`` are optional. When not provided, -``display_name`` defaults to the ``cluster_id`` value and ``serve_nodes`` -defaults to the minimum allowed: -:data:`DEFAULT_SERVE_NODES `. - -Even if this :class:`Cluster ` already -has been created with the API, you'll want this object to use as a -parent of a :class:`Table ` just as the -:class:`Client ` is used as the parent of -a :class:`Cluster `. - -Create a new Cluster --------------------- - -After creating the cluster object, make a `CreateCluster`_ API request -with :meth:`create() `: - -.. code:: python - - cluster.display_name = 'My very own cluster' - cluster.create() - -If you would like more than the minimum number of nodes -(:data:`DEFAULT_SERVE_NODES `) -in your cluster: - -.. code:: python - - cluster.serve_nodes = 10 - cluster.create() - -Check on Current Operation --------------------------- - -.. note:: - - When modifying a cluster (via a `CreateCluster`_, `UpdateCluster`_ or - `UndeleteCluster`_ request), the Bigtable API will return a - `long-running operation`_ and a corresponding - :class:`Operation ` object - will be returned by each of - :meth:`create() `, - :meth:`update() ` and - :meth:`undelete() `. - -You can check if a long-running operation (for a -:meth:`create() `, -:meth:`update() ` or -:meth:`undelete() `) has finished -by making a `GetOperation`_ request with -:meth:`Operation.finished() `: - -.. code:: python - - >>> operation = cluster.create() - >>> operation.finished() - True - -.. note:: - - Once an :class:`Operation ` object - has returned :data:`True` from - :meth:`finished() `, the - object should not be re-used. Subsequent calls to - :meth:`finished() ` - will result in a :class:`ValueError `. - -Get metadata for an existing Cluster ------------------------------------- - -After creating the cluster object, make a `GetCluster`_ API request -with :meth:`reload() `: - -.. code:: python - - cluster.reload() - -This will load ``serve_nodes`` and ``display_name`` for the existing -``cluster`` in addition to the ``cluster_id``, ``zone`` and ``project`` -already set on the :class:`Cluster ` object. - -Update an existing Cluster --------------------------- - -After creating the cluster object, make an `UpdateCluster`_ API request -with :meth:`update() `: - -.. code:: python - - client.display_name = 'New display_name' - cluster.update() - -Delete an existing Cluster --------------------------- - -Make a `DeleteCluster`_ API request with -:meth:`delete() `: - -.. code:: python - - cluster.delete() - -Undelete a deleted Cluster --------------------------- - -Make an `UndeleteCluster`_ API request with -:meth:`undelete() `: - -.. code:: python - - cluster.undelete() - -Next Step ---------- - -Now we go down the hierarchy from -:class:`Cluster ` to a -:class:`Table `. - -Head next to learn about the :doc:`bigtable-table-api`. - -.. _Cluster Admin API: https://cloud.google.com/bigtable/docs/creating-cluster -.. _CreateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L66-L68 -.. _GetCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L38-L40 -.. _UpdateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L93-L95 -.. _DeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L109-L111 -.. _ListZones: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L33-L35 -.. _ListClusters: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L44-L46 -.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 -.. _UndeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L126-L128 -.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/docs/bigtable-instance-api.rst b/docs/bigtable-instance-api.rst new file mode 100644 index 000000000000..c2fd1402a97b --- /dev/null +++ b/docs/bigtable-instance-api.rst @@ -0,0 +1,133 @@ +Instance Admin API +================== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +After creating a :class:`Client `, you can +interact with individual instances for a project. + +List Intances +------------- + +If you want a comprehensive list of all existing intances, make a +`ListInstances`_ API request with +:meth:`Client.list_intances() `: + +.. code:: python + + intances = client.list_intances() + +Instance Factory +---------------- + +To create a :class:`Instance ` object: + +.. code:: python + + instance = client.instance(instance_id, display_name=display_name) + +``display_name`` is optional. When not provided, +``display_name`` defaults to the ``instance_id`` value. + +Even if this :class:`Instance ` already +has been created with the API, you'll want this object to use as a +parent of a :class:`Table ` just as the +:class:`Client ` is used as the parent of +a :class:`Instance `. + +Create a new Instance +--------------------- + +After creating the instance object, make a `CreateInstance`_ API request +with :meth:`create() `: + +.. code:: python + + instance.display_name = 'My very own instance' + instance.create() + +Check on Current Operation +-------------------------- + +.. note:: + + When modifying a instance (via a `CreateInstance`_ request), the Bigtable + API will return a `long-running operation`_ and a corresponding + :class:`Operation ` object + will be returned by + :meth:`create() ``. + +You can check if a long-running operation (for a +:meth:`create() ` has finished +by making a `GetOperation`_ request with +:meth:`Operation.finished() `: + +.. code:: python + + >>> operation = instance.create() + >>> operation.finished() + True + +.. note:: + + Once an :class:`Operation ` object + has returned :data:`True` from + :meth:`finished() `, the + object should not be re-used. Subsequent calls to + :meth:`finished() ` + will result in a :class:`ValueError `. + +Get metadata for an existing Instance +------------------------------------- + +After creating the instance object, make a `GetInstance`_ API request +with :meth:`reload() `: + +.. code:: python + + instance.reload() + +This will load ``display_name`` for the existing ``instance`` object. + +Update an existing Instance +--------------------------- + +After creating the instance object, make an `UpdateInstance`_ API request +with :meth:`update() `: + +.. code:: python + + client.display_name = 'New display_name' + instance.update() + +Delete an existing Instance +--------------------------- + +Make a `DeleteInstance`_ API request with +:meth:`delete() `: + +.. code:: python + + instance.delete() + +Next Step +--------- + +Now we go down the hierarchy from +:class:`Instance ` to a +:class:`Table `. + +Head next to learn about the :doc:`bigtable-table-api`. + +.. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance +.. _CreateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L66-L68 +.. _GetInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L38-L40 +.. _UpdateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L93-L95 +.. _DeleteInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L109-L111 +.. _ListInstances: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L44-L46 +.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 +.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/docs/bigtable-instance.rst b/docs/bigtable-instance.rst new file mode 100644 index 000000000000..7ba1c15d8df3 --- /dev/null +++ b/docs/bigtable-instance.rst @@ -0,0 +1,12 @@ +Instance +~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.instance + :members: + :show-inheritance: diff --git a/docs/bigtable-table-api.rst b/docs/bigtable-table-api.rst index 78ac3c6f079a..554b157031f9 100644 --- a/docs/bigtable-table-api.rst +++ b/docs/bigtable-table-api.rst @@ -7,20 +7,20 @@ Table Admin API ``grpcio`` is only supported in Python 2.7, so importing :mod:`gcloud.bigtable` in other versions of Python will fail. -After creating a :class:`Cluster `, you can +After creating a :class:`Instance `, you can interact with individual tables, groups of tables or column families within a table. List Tables ----------- -If you want a comprehensive list of all existing tables in a cluster, make a +If you want a comprehensive list of all existing tables in a instance, make a `ListTables`_ API request with -:meth:`Cluster.list_tables() `: +:meth:`Instance.list_tables() `: .. code:: python - >>> cluster.list_tables() + >>> instance.list_tables() [, ] @@ -31,7 +31,7 @@ To create a :class:`Table ` object: .. code:: python - table = cluster.table(table_id) + table = instance.table(table_id) Even if this :class:`Table ` already has been created with the API, you'll want this object to use as a diff --git a/docs/index.rst b/docs/index.rst index df0aa0ea9980..b263dba70531 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -63,10 +63,11 @@ bigtable-usage HappyBase bigtable-client-intro - bigtable-cluster-api + bigtable-instance-api bigtable-table-api bigtable-data-api Client + bigtable-instance bigtable-cluster bigtable-table bigtable-column-family From aa954423fbd4a9ffa2fe0b902298a3fc46e1f7fd Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 00:14:38 -0400 Subject: [PATCH 039/103] Use 'grpcio-tools' in a virtualenv to generate latest Bigtable V2 protos. Closes #1928. This is a better hack for #1482, but we still really want #1384. --- .gitignore | 1 + Makefile.bigtable_v2 | 43 +- gcloud/bigtable/_generated_v2/_bigtable.proto | 82 +-- .../_bigtable_instance_admin.proto | 125 ++-- .../_generated_v2/_bigtable_table_admin.proto | 34 +- gcloud/bigtable/_generated_v2/_common.proto | 4 + gcloud/bigtable/_generated_v2/_data.proto | 153 ++--- gcloud/bigtable/_generated_v2/_instance.proto | 39 +- gcloud/bigtable/_generated_v2/_table.proto | 52 +- .../bigtable_instance_admin_pb2.py | 535 +++++++++++++----- gcloud/bigtable/_generated_v2/bigtable_pb2.py | 290 ++++++++-- .../_generated_v2/bigtable_table_admin_pb2.py | 311 +++++++--- .../_generated_v2/operations_grpc_pb2.py | 256 +++++++++ gcloud/bigtable/_generated_v2/table_pb2.py | 182 +----- scripts/make_operations_grpc.py | 18 +- scripts/rewrite_imports.py | 4 + 16 files changed, 1413 insertions(+), 716 deletions(-) diff --git a/.gitignore b/.gitignore index feb24cb93c97..a84250e78819 100644 --- a/.gitignore +++ b/.gitignore @@ -57,3 +57,4 @@ scripts/pylintrc_reduced generated_python/ cloud-bigtable-client/ googleapis-pb/ +grpc_python_venv/ diff --git a/Makefile.bigtable_v2 b/Makefile.bigtable_v2 index 03962bdd127d..1c9bdae13930 100644 --- a/Makefile.bigtable_v2 +++ b/Makefile.bigtable_v2 @@ -1,10 +1,8 @@ +GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv GENERATED_DIR=$(shell pwd)/generated_python GENERATED_SUBDIR=_generated_v2 BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) -GRPC_PLUGIN=grpc_python_plugin -PROTOC_CMD=protoc -BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client -BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-protos/src/main/proto +PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb help: @@ -15,31 +13,30 @@ help: @echo ' make clean Clean generated files ' generate: + # Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools + [ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV) + $(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools # Retrieve git repos that have our *.proto files. - [ -d $(BIGTABLE_CHECKOUT_DIR) ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 - cd $(BIGTABLE_CHECKOUT_DIR) && git pull origin master [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 cd googleapis-pb && git pull origin master # Make the directory where our *_pb2.py files will go. mkdir -p $(GENERATED_DIR) # Generate all *_pb2.py files that require gRPC. $(PROTOC_CMD) \ - --proto_path=$(BIGTABLE_PROTOS_DIR) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ --python_out=$(GENERATED_DIR) \ - --plugin=protoc-gen-grpc=$(GRPC_PLUGIN) \ - --grpc_out=$(GENERATED_DIR) \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/bigtable.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_instance_admin.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_table_admin.proto + --grpc_python_out=$(GENERATED_DIR) \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/bigtable.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_instance_admin.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_table_admin.proto # Generate all *_pb2.py files that do not require gRPC. $(PROTOC_CMD) \ - --proto_path=$(BIGTABLE_PROTOS_DIR) \ --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ --python_out=$(GENERATED_DIR) \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/data.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/common.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/instance.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/table.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/data.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/common.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/instance.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/table.proto \ # Move the newly generated *_pb2.py files into our library. cp $(GENERATED_DIR)/google/bigtable/v2/* $(BIGTABLE_DIR) cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR) @@ -47,9 +44,9 @@ generate: # Remove all existing *.proto files before we replace rm -f $(BIGTABLE_DIR)/*.proto # Copy over the *.proto files into our library. - cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/*.proto $(BIGTABLE_DIR) - cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/*.proto $(BIGTABLE_DIR) - cp $(BIGTABLE_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) + cp $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/*.proto $(BIGTABLE_DIR) + cp $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/*.proto $(BIGTABLE_DIR) + cp $(GOOGLEAPIS_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) # Rename all *.proto files in our library with an # underscore and remove executable bit. cd $(BIGTABLE_DIR) && \ @@ -60,8 +57,8 @@ generate: # Separate the gRPC parts of the operations service from the # non-gRPC parts so that the protos from `googleapis-common-protos` # can be used without gRPC. - PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \ - GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ + GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ python scripts/make_operations_grpc.py # Rewrite the imports in the generated *_pb2.py files. python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py @@ -70,6 +67,6 @@ check_generate: python scripts/check_generate.py clean: - rm -fr $(BIGTABLE_CHECKOUT_DIR) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) + rm -fr $(GRPCIO_VIRTUALENV) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) .PHONY: generate check_generate clean diff --git a/gcloud/bigtable/_generated_v2/_bigtable.proto b/gcloud/bigtable/_generated_v2/_bigtable.proto index 900168773363..49e27ca2ff5f 100644 --- a/gcloud/bigtable/_generated_v2/_bigtable.proto +++ b/gcloud/bigtable/_generated_v2/_bigtable.proto @@ -27,18 +27,12 @@ option java_package = "com.google.bigtable.v2"; // Service for reading from and writing to existing Bigtable tables. -// -// Caution: This service is experimental. The details can change and the rpcs -// may or may not be active. service Bigtable { // Streams back the contents of all requested rows, optionally // applying the same Reader filter to each. Depending on their size, // rows and cells may be broken up across multiple responses, but // atomicity of each row will still be preserved. See the // ReadRowsResponse documentation for details. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" }; } @@ -47,18 +41,12 @@ service Bigtable { // delimit contiguous sections of the table of approximately equal size, // which can be used to break up the data for distributed tasks like // mapreduces. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" }; } // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. + // unchanged unless explicitly changed by `mutation`. rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" }; } @@ -66,28 +54,20 @@ service Bigtable { // Mutates multiple rows in a batch. Each individual row is mutated // atomically as in MutateRow, but the entire batch is not executed // atomically. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" }; } // Mutates a row atomically based on the output of a predicate Reader filter. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" }; } - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. + // Modifies a row atomically. The method reads the latest existing timestamp + // and value from the specified columns and writes a new entry based on + // pre-defined read/modify/write rules. The new value for the timestamp is the + // greater of the existing timestamp or the current server time. The method + // returns the new contents of all modified cells. rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" }; } @@ -97,7 +77,7 @@ service Bigtable { message ReadRowsRequest { // The unique name of the table from which to read. // Values are of the form - // projects//instances//tables/
+ // projects/<project>/instances/<instance>/tables/<table> string table_name = 1; // The row keys and/or ranges to read. If not specified, reads from all rows. @@ -128,22 +108,22 @@ message ReadRowsResponse { // family as the previous CellChunk. The empty string can occur as a // column family name in a response so clients must check // explicitly for the presence of this message, not just for - // family_name.value being non-empty. + // `family_name.value` being non-empty. google.protobuf.StringValue family_name = 2; // The column qualifier for this chunk of data. If this message // is not present, this CellChunk is a continuation of the same column // as the previous CellChunk. Column qualifiers may be empty so // clients must check for the presence of this message, not just - // for qualifier.value being non-empty. + // for `qualifier.value` being non-empty. google.protobuf.BytesValue qualifier = 3; // The cell's stored timestamp, which also uniquely identifies it // within its column. Values are always expressed in // microseconds, but individual tables may set a coarser - // "granularity" to further restrict the allowed values. For + // granularity to further restrict the allowed values. For // example, a table which specifies millisecond granularity will - // only allow values of "timestamp_micros" which are multiples of + // only allow values of `timestamp_micros` which are multiples of // 1000. Timestamps are only set in the first CellChunk per cell // (for cells split into multiple chunks). int64 timestamp_micros = 4; @@ -168,11 +148,11 @@ message ReadRowsResponse { oneof row_status { // Indicates that the client should drop all previous chunks for - // "row_key", as it will be re-read from the beginning. + // `row_key`, as it will be re-read from the beginning. bool reset_row = 8; // Indicates that the client can safely process all previous chunks for - // "row_key", as its data has been fully read. + // `row_key`, as its data has been fully read. bool commit_row = 9; } } @@ -193,7 +173,7 @@ message ReadRowsResponse { message SampleRowKeysRequest { // The unique name of the table from which to sample row keys. // Values are of the form - // projects//instances//tables/
+ // projects/<project>/instances/<instance>/tables/<table> string table_name = 1; } @@ -209,9 +189,9 @@ message SampleRowKeysResponse { bytes row_key = 1; // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent + // `row_key`. Buffering the contents of all rows between two subsequent // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. + // `offset_bytes` fields. int64 offset_bytes = 2; } @@ -219,7 +199,7 @@ message SampleRowKeysResponse { message MutateRowRequest { // The unique name of the table to which the mutation should be applied. // Values are of the form - // projects//instances//tables/
+ // projects/<project>/instances/<instance>/tables/<table> string table_name = 1; // The key of the row to which the mutation should be applied. @@ -245,17 +225,17 @@ message MutateRowsRequest { // Changes to be atomically applied to the specified row. Mutations are // applied in order, meaning that earlier mutations can be masked by // later ones. - // At least one mutation must be specified. + // You must specify at least one mutation. repeated Mutation mutations = 2; } // The unique name of the table to which the mutations should be applied. string table_name = 1; - // The row keys/mutations to be applied in bulk. + // The row keys and corresponding mutations to be applied in bulk. // Each entry is applied as an atomic mutation, but the entries may be // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries may + // At least one entry must be specified, and in total the entries can // contain at most 100000 mutations. repeated Entry entries = 2; } @@ -283,36 +263,36 @@ message CheckAndMutateRowRequest { // The unique name of the table to which the conditional mutation should be // applied. // Values are of the form - // projects//instances//tables/
+ // projects/<project>/instances/<instance>/tables/<table> string table_name = 1; // The key of the row to which the conditional mutation should be applied. bytes row_key = 2; // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains + // on whether or not any results are yielded, either `true_mutations` or + // `false_mutations` will be executed. If unset, checks that the row contains // any values at all. RowFilter predicate_filter = 6; - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in + // Changes to be atomically applied to the specified row if `predicate_filter` + // yields at least one cell when applied to `row_key`. Entries are applied in // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most + // Must contain at least one entry if `false_mutations` is empty, and at most // 100000. repeated Mutation true_mutations = 4; - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in + // Changes to be atomically applied to the specified row if `predicate_filter` + // does not yield any cells when applied to `row_key`. Entries are applied in // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most + // Must contain at least one entry if `true_mutations` is empty, and at most // 100000. repeated Mutation false_mutations = 5; } // Response message for Bigtable.CheckAndMutateRow. message CheckAndMutateRowResponse { - // Whether or not the request's "predicate_filter" yielded any results for + // Whether or not the request's `predicate_filter` yielded any results for // the specified row. bool predicate_matched = 1; } @@ -322,7 +302,7 @@ message ReadModifyWriteRowRequest { // The unique name of the table to which the read/modify/write rules should be // applied. // Values are of the form - // projects//instances//tables/
+ // projects/<project>/instances/<instance>/tables/<table> string table_name = 1; // The key of the row to which the read/modify/write rules should be applied. diff --git a/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto b/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto index c27d266a8e62..bda5d2163532 100644 --- a/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto +++ b/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto @@ -17,7 +17,6 @@ syntax = "proto3"; package google.bigtable.admin.v2; import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/common.proto"; import "google/bigtable/admin/v2/instance.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; @@ -31,86 +30,53 @@ option java_package = "com.google.bigtable.admin.v2"; // Service for creating, configuring, and deleting Cloud Bigtable Instances and // Clusters. Provides access to the Instance and Cluster schemas only, not the // tables metadata or data stored in those tables. -// -// Caution: This service is experimental. The details can change and the rpcs -// may or may not be active. service BigtableInstanceAdmin { // Create an instance within a project. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v2/{name=projects/*}/instances" body: "*" }; + option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" }; } // Gets information about an instance. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc GetInstance(GetInstanceRequest) returns (Instance) { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" }; } // Lists information about instances in a project. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { get: "/v2/{name=projects/*}/instances" }; + option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" }; } // Updates an instance within a project. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc UpdateInstance(Instance) returns (Instance) { option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" }; } // Delete an instance from a project. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" }; } // Creates a cluster within an instance. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v2/{name=projects/*/instances/*}/clusters" body: "cluster" }; + option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" }; } // Gets information about a cluster. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc GetCluster(GetClusterRequest) returns (Cluster) { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" }; } // Lists information about clusters in an instance. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}/clusters" }; + option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" }; } // Updates a cluster within an instance. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" }; } // Deletes a cluster from an instance. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" }; } @@ -118,23 +84,48 @@ service BigtableInstanceAdmin { // Request message for BigtableInstanceAdmin.CreateInstance. message CreateInstanceRequest { - string name = 1; + // The unique name of the project in which to create the new instance. + // Values are of the form projects/ + string parent = 1; + // The id to be used when referring to the new instance within its project, + // e.g. just the "myinstance" section of the full name + // "projects/myproject/instances/myinstance" string instance_id = 2; + // The instance to create. + // Fields marked "@OutputOnly" must be left blank. Instance instance = 3; + // The clusters to be created within the instance, mapped by desired + // cluster ID (e.g. just the "mycluster" part of the full name + // "projects/myproject/instances/myinstance/clusters/mycluster"). + // Fields marked "@OutputOnly" must be left blank. + // Currently exactly one cluster must be specified. map clusters = 4; } // Request message for BigtableInstanceAdmin.GetInstance. message GetInstanceRequest { + // The unique name of the requested instance. Values are of the form + // projects//instances/ string name = 1; } // Request message for BigtableInstanceAdmin.ListInstances. message ListInstancesRequest { - string name = 1; + // The unique name of the project for which a list of instances is requested. + // Values are of the form projects/ + string parent = 1; + + // The value of `next_page_token` returned by a previous call. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListInstances. +message ListInstancesResponse { + // The list of requested instances. + repeated Instance instances = 1; // Locations from which Instance information could not be retrieved, // due to an outage or some other transient condition. @@ -143,47 +134,58 @@ message ListInstancesRequest { // Cluster in a failed location may only have partial information returned. repeated string failed_locations = 2; - string page_token = 3; -} - -// Response message for BigtableInstanceAdmin.ListInstances. -message ListInstancesResponse { - repeated Instance instances = 1; - - string next_page_token = 2; + // Set if not all instances could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 3; } // Request message for BigtableInstanceAdmin.DeleteInstance. message DeleteInstanceRequest { + // The unique name of the instance to be deleted. + // Values are of the form projects//instances/ string name = 1; } // Request message for BigtableInstanceAdmin.CreateCluster. message CreateClusterRequest { - string name = 1; - + // The unique name of the instance in which to create the new cluster. + // Values are of the form + // projects//instances//clusters/[a-z][-a-z0-9]* + string parent = 1; + + // The id to be used when referring to the new cluster within its instance, + // e.g. just the "mycluster" section of the full name + // "projects/myproject/instances/myinstance/clusters/mycluster" string cluster_id = 2; + // The cluster to be created. + // Fields marked "@OutputOnly" must be left blank. Cluster cluster = 3; } // Request message for BigtableInstanceAdmin.GetCluster. message GetClusterRequest { + // The unique name of the requested cluster. Values are of the form + // projects//instances//clusters/ string name = 1; } // Request message for BigtableInstanceAdmin.ListClusters. message ListClustersRequest { - // Values are of the form projects//instances/ - // Use = '-' to list Clusters for all Instances in a project, + // The unique name of the instance for which a list of clusters is requested. + // Values are of the form projects//instances/ + // Use = '-' to list Clusters for all Instances in a project, // for example "projects/myproject/instances/-" - string name = 1; + string parent = 1; + // The value of `next_page_token` returned by a previous call. string page_token = 2; } // Response message for BigtableInstanceAdmin.ListClusters. message ListClustersResponse { + // The list of requested clusters. repeated Cluster clusters = 1; // Locations from which Cluster information could not be retrieved, @@ -192,11 +194,16 @@ message ListClustersResponse { // or may only have partial information returned. repeated string failed_locations = 2; + // Set if not all clusters could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. string next_page_token = 3; } // Request message for BigtableInstanceAdmin.DeleteCluster. message DeleteClusterRequest { + // The unique name of the cluster to be deleted. Values are of the form + // projects//instances//clusters/ string name = 1; } @@ -211,3 +218,15 @@ message CreateInstanceMetadata { // The time at which the operation failed or was completed successfully. google.protobuf.Timestamp finish_time = 3; } + +// The metadata for the Operation returned by UpdateCluster. +message UpdateClusterMetadata { + // The request that prompted the initiation of this UpdateCluster operation. + Cluster original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} diff --git a/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto b/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto index 8654a2a92d57..0a39e298359c 100644 --- a/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto +++ b/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto @@ -28,49 +28,31 @@ option java_package = "com.google.bigtable.admin.v2"; // Service for creating, configuring, and deleting Cloud Bigtable tables. // Provides access to the table schemas only, not the data stored within // the tables. -// -// Caution: This service is experimental. The details can change and the rpcs -// may or may not be active. service BigtableTableAdmin { // Creates a new table in the specified instance. // The table can be created with a full set of initial column families, // specified in the request. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v2/{name=projects/*/instances/*}/tables" body: "*" }; + option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" }; } // Lists all tables served from a specified instance. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}/tables" }; + option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" }; } // Gets metadata information about the specified table. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc GetTable(GetTableRequest) returns (Table) { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" }; } // Permanently deletes a specified table and all of its data. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" }; } // Atomically performs a series of column family modifications // on the specified table. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" }; } @@ -78,9 +60,6 @@ service BigtableTableAdmin { // Permanently drop/delete a row range from a specified table. The request can // specify whether to delete all rows in a table, or only those that match a // particular prefix. - // - // Caution: This rpc is experimental. The details can change and the rpc - // may or may not be active. rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" }; } @@ -96,7 +75,7 @@ message CreateTableRequest { // The unique name of the instance in which to create the table. // Values are of the form projects//instances/ - string name = 1; + string parent = 1; // The name by which the new table should be referred to within the parent // instance, e.g. "foobar" rather than "/tables/foobar". @@ -143,13 +122,13 @@ message DropRowRangeRequest { message ListTablesRequest { // The unique name of the instance for which tables should be listed. // Values are of the form projects//instances/ - string name = 1; + string parent = 1; // The view to be applied to the returned tables' fields. // Defaults to NAME_ONLY if unspecified (no others are currently supported). Table.View view = 2; - // Not yet supported. + // The value of `next_page_token` returned by a previous call. string page_token = 3; } @@ -158,6 +137,9 @@ message ListTablesResponse { // The tables present in the requested cluster. repeated Table tables = 1; + // Set if not all tables could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. string next_page_token = 2; } diff --git a/gcloud/bigtable/_generated_v2/_common.proto b/gcloud/bigtable/_generated_v2/_common.proto index 17cd4ffe19e8..1912e03e0446 100644 --- a/gcloud/bigtable/_generated_v2/_common.proto +++ b/gcloud/bigtable/_generated_v2/_common.proto @@ -24,10 +24,14 @@ option java_outer_classname = "CommonProto"; option java_package = "com.google.bigtable.admin.v2"; +// Storage media types for persisting Bigtable data. enum StorageType { + // The user did not specify a storage type. STORAGE_TYPE_UNSPECIFIED = 0; + // Flash (SSD) storage should be used. SSD = 1; + // Magnetic drive (HDD) storage should be used. HDD = 2; } diff --git a/gcloud/bigtable/_generated_v2/_data.proto b/gcloud/bigtable/_generated_v2/_data.proto index aab5e9972603..720f48279b8f 100644 --- a/gcloud/bigtable/_generated_v2/_data.proto +++ b/gcloud/bigtable/_generated_v2/_data.proto @@ -34,12 +34,13 @@ message Row { repeated Family families = 2; } -// Specifies (some of) the contents of a single row/column family of a table. +// Specifies (some of) the contents of a single row/column family intersection +// of a table. message Family { // The unique key which identifies this family within its row. This is the // same key that's used to identify the family in, for example, a RowFilter // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may + // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may // produce cells in a sentinel family with an empty name. // Must be no greater than 64 characters in length. string name = 1; @@ -48,11 +49,12 @@ message Family { repeated Column columns = 2; } -// Specifies (some of) the contents of a single row/column of a table. +// Specifies (some of) the contents of a single row/column intersection of a +// table. message Column { // The unique key which identifies this column within its family. This is the // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. + // which sets its `column_qualifier_regex_filter` field. // May contain any byte string, including the empty string, up to 16kiB in // length. bytes qualifier = 1; @@ -66,9 +68,9 @@ message Cell { // The cell's stored timestamp, which also uniquely identifies it within // its column. // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For + // a coarser granularity to further restrict the allowed values. For // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. + // values of `timestamp_micros` which are multiples of 1000. int64 timestamp_micros = 1; // The value stored in the cell. @@ -76,7 +78,7 @@ message Cell { // length. bytes value = 2; - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. + // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. repeated string labels = 3; } @@ -113,14 +115,14 @@ message RowSet { } // Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. +// The range spans from <column_family>:<start_qualifier> to +// <column_family>:<end_qualifier>, where both bounds can be either +// inclusive or exclusive. message ColumnRange { // The name of the column family within which this range falls. string family_name = 1; - // The column qualifier at which to start the range (within 'column_family'). + // The column qualifier at which to start the range (within `column_family`). // If neither field is set, interpreted as the empty string, inclusive. oneof start_qualifier { // Used when giving an inclusive lower bound for the range. @@ -130,7 +132,7 @@ message ColumnRange { bytes start_qualifier_open = 3; } - // The column qualifier at which to end the range (within 'column_family'). + // The column qualifier at which to end the range (within `column_family`). // If neither field is set, interpreted as the infinite string, exclusive. oneof end_qualifier { // Used when giving an inclusive upper bound for the range. @@ -186,18 +188,18 @@ message ValueRange { // (chains and interleaves). They work as follows: // // * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", +// from the output row. An example of a true filter is the `value_regex_filter`, // which excludes cells whose values don't match the specified pattern. All // regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) // in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. +// important point to keep in mind is that `RE2(.)` is equivalent by default to +// `RE2([^\n])`, meaning that it does not match newlines. When attempting to +// match an arbitrary byte, you should therefore use the escape sequence `\C`, +// which may need to be further escaped as `\\C` in your client language. // // * Transformers alter the input row by changing the values of some of its // cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every +// supported transformer is the `strip_value_transformer`, which replaces every // cell's value with the empty string. // // * Chains and interleaves are described in more detail in the @@ -224,23 +226,24 @@ message RowFilter { // they will all appear in the output row in an unspecified mutual order. // Consider the following example, with three filters: // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // // All interleaved filters are executed atomically. repeated RowFilter filters = 1; } @@ -253,15 +256,15 @@ message RowFilter { // results. Additionally, Condition filters have poor performance, especially // when filters are set for the false condition. message Condition { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. + // If `predicate_filter` outputs any cells, then `true_filter` will be + // evaluated on the input row. Otherwise, `false_filter` will be evaluated. RowFilter predicate_filter = 1; - // The filter to apply to the input row if "predicate_filter" returns any + // The filter to apply to the input row if `predicate_filter` returns any // results. If not provided, no results will be returned in the true case. RowFilter true_filter = 2; - // The filter to apply to the input row if "predicate_filter" does not + // The filter to apply to the input row if `predicate_filter` does not // return any results. If not provided, no results will be returned in the // false case. RowFilter false_filter = 3; @@ -287,14 +290,14 @@ message RowFilter { // the output of the read rather than to any parent filter. Consider the // following example: // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) + // Chain( + // FamilyRegex("A"), + // Interleave( + // All(), + // Chain(Label("foo"), Sink()) + // ), + // QualifierRegex("B") + // ) // // A,A,1,w // A,B,2,x @@ -332,7 +335,7 @@ message RowFilter { // Despite being excluded by the qualifier filter, a copy of every cell // that reaches the sink is present in the final result. // - // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], + // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave], // duplicate cells are possible, and appear in an unspecified mutual order. // In this case we have a duplicate with column "A:B" and timestamp 2, // because one copy passed through the all filter while the other was @@ -340,7 +343,7 @@ message RowFilter { // while the other does not. // // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. + // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition]. bool sink = 16; // Matches all cells, regardless of input. Functionally equivalent to @@ -354,9 +357,9 @@ message RowFilter { // Matches only cells from rows whose keys satisfy the given RE2 regex. In // other words, passes through the entire row when the key matches, and // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a + // Note that, since row keys can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a // binary key. bytes row_key_regex_filter = 4; @@ -365,18 +368,18 @@ message RowFilter { double row_sample_filter = 14; // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the ':' + // regex. For technical reasons, the regex must not contain the `:` // character, even if it is not being used as a literal. // Note that, since column families cannot contain the new line character - // '\n', it is sufficient to use '.' as a full wildcard when matching + // `\n`, it is sufficient to use `.` as a full wildcard when matching // column family names. string family_name_regex_filter = 5; // Matches only cells from columns whose qualifiers satisfy the given RE2 // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the '\C' - // escape sequence must be used if a true wildcard is desired. The '.' - // character will not match the new line character '\n', which may be + // Note that, since column qualifiers can contain arbitrary bytes, the `\C` + // escape sequence must be used if a true wildcard is desired. The `.` + // character will not match the new line character `\n`, which may be // present in a binary qualifier. bytes column_qualifier_regex_filter = 6; @@ -387,9 +390,9 @@ message RowFilter { TimestampRange timestamp_range_filter = 8; // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a + // Note that, since cell values can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a // binary value. bytes value_regex_filter = 9; @@ -407,9 +410,9 @@ message RowFilter { int32 cells_per_row_limit_filter = 11; // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, - // skip all earlier cells in "foo:bar", and then begin matching again in - // column "foo:bar2". + // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, + // skip all earlier cells in `foo:bar`, and then begin matching again in + // column `foo:bar2`. // If duplicate cells are present, as is possible when using an Interleave, // each copy of the cell is counted separately. int32 cells_per_column_limit_filter = 12; @@ -422,14 +425,14 @@ message RowFilter { // the filter. // // Values must be at most 15 characters in length, and match the RE2 - // pattern [a-z0-9\\-]+ + // pattern `[a-z0-9\\-]+` // // Due to a technical limitation, it is not currently possible to apply // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a apply_label_transformer. It is okay for - // an Interleave to contain multiple apply_label_transformers, as they will - // be applied to separate copies of the input. This may be relaxed in the - // future. + // one sub-filter which contains a `apply_label_transformer`. It is okay for + // an Interleave to contain multiple `apply_label_transformers`, as they + // will be applied to separate copies of the input. This may be relaxed in + // the future. string apply_label_transformer = 19; } } @@ -439,7 +442,7 @@ message Mutation { // A Mutation which sets the value of the specified cell. message SetCell { // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ + // Must match `[-_.a-zA-Z0-9]+` string family_name = 1; // The qualifier of the column into which new data should be written. @@ -450,7 +453,7 @@ message Mutation { // Use -1 for current Bigtable server time. // Otherwise, the client should set this value itself, noting that the // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). + // Values must match the granularity of the table (e.g. micros, millis). int64 timestamp_micros = 3; // The value to be written into the specified cell. @@ -461,7 +464,7 @@ message Mutation { // restricting the deletions to a given timestamp range. message DeleteFromColumn { // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ + // Must match `[-_.a-zA-Z0-9]+` string family_name = 1; // The qualifier of the column from which cells should be deleted. @@ -475,7 +478,7 @@ message Mutation { // A Mutation which deletes all cells from the specified column family. message DeleteFromFamily { // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ + // Must match `[-_.a-zA-Z0-9]+` string family_name = 1; } @@ -504,7 +507,7 @@ message Mutation { // specified column. message ReadModifyWriteRule { // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ + // Must match `[-_.a-zA-Z0-9]+` string family_name = 1; // The qualifier of the column to which the read/modify/write should be @@ -515,12 +518,12 @@ message ReadModifyWriteRule { // The rule used to determine the column's new latest value from its current // latest value. oneof rule { - // Rule specifying that "append_value" be appended to the existing value. + // Rule specifying that `append_value` be appended to the existing value. // If the targeted cell is unset, it will be treated as containing the // empty string. bytes append_value = 3; - // Rule specifying that "increment_amount" be added to the existing value. + // Rule specifying that `increment_amount` be added to the existing value. // If the targeted cell is unset, it will be treated as containing a zero. // Otherwise, the targeted cell must contain an 8-byte value (interpreted // as a 64-bit big-endian signed integer), or the entire request will fail. diff --git a/gcloud/bigtable/_generated_v2/_instance.proto b/gcloud/bigtable/_generated_v2/_instance.proto index eb324ea9a02c..4aa3f9d06dd3 100644 --- a/gcloud/bigtable/_generated_v2/_instance.proto +++ b/gcloud/bigtable/_generated_v2/_instance.proto @@ -24,10 +24,18 @@ option java_outer_classname = "InstanceProto"; option java_package = "com.google.bigtable.admin.v2"; +// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and +// the resources that serve them. +// All tables in an instance are served from a single +// [Cluster][google.bigtable.admin.v2.Cluster]. message Instance { + // Possible states of an instance. enum State { + // The state of the instance could not be determined. STATE_NOT_KNOWN = 0; + // The instance has been successfully created and can serve requests + // to its tables. READY = 1; // The instance is currently being created, and may be destroyed @@ -36,43 +44,70 @@ message Instance { } // @OutputOnly + // The unique name of the instance. Values are of the form + // projects//instances/[a-z][a-z0-9\\-]+[a-z0-9] string name = 1; + // The descriptive name for this instance as it appears in UIs. + // Can be changed at any time, but should be kept globally unique + // to avoid confusion. string display_name = 2; - // @OutputOnly + // + // The current state of the instance. State state = 3; } +// A resizable group of nodes in a particular cloud location, capable +// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent +// [Instance][google.bigtable.admin.v2.Instance]. message Cluster { + // Possible states of a cluster. enum State { + // The state of the cluster could not be determined. STATE_NOT_KNOWN = 0; + // The cluster has been successfully created and is ready to serve requests. READY = 1; // The cluster is currently being created, and may be destroyed // if the creation process encounters an error. + // A cluster may not be able to serve requests while being created. CREATING = 2; + // The cluster is currently being resized, and may revert to its previous + // node count if the process encounters an error. + // A cluster is still capable of serving requests while being resized, + // but may exhibit performance as if its number of allocated nodes is + // between the starting and requested states. RESIZING = 3; - // The cluster has no backing nodes. The data (tables) still + // The cluster has no backing nodes. The data (tables) still // exist, but no operations can be performed on the cluster. DISABLED = 4; } // @OutputOnly + // The unique name of the cluster. Values are of the form + // projects//instances//clusters/[a-z][-a-z0-9]* string name = 1; // @CreationOnly + // The location where this cluster's nodes and storage reside. For best + // performance, clients should be located as close as possible to this cluster. // Currently only zones are supported, e.g. projects/*/locations/us-central1-b string location = 2; // @OutputOnly + // The current state of the cluster. State state = 3; + // The number of nodes allocated to this cluster. More nodes enable higher + // throughput and more consistent performance. int32 serve_nodes = 4; // @CreationOnly + // The type of storage used by this cluster to serve its + // parent instance's tables, unless explicitly overridden. StorageType default_storage_type = 5; } diff --git a/gcloud/bigtable/_generated_v2/_table.proto b/gcloud/bigtable/_generated_v2/_table.proto index f5516aaf1bf5..63e41103e42f 100644 --- a/gcloud/bigtable/_generated_v2/_table.proto +++ b/gcloud/bigtable/_generated_v2/_table.proto @@ -16,6 +16,7 @@ syntax = "proto3"; package google.bigtable.admin.v2; +import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; option java_multiple_files = true; @@ -26,43 +27,14 @@ option java_package = "com.google.bigtable.admin.v2"; // A collection of user data indexed by row, column, and timestamp. // Each table is served using the resources of its parent cluster. message Table { - // The state of a table's data in a particular cluster. - message ClusterState { - enum ReplicationState { - // The replication state of the table is unknown in this cluster. - STATE_NOT_KNOWN = 0; - - // The cluster was recently created, and the table must finish copying - // over pre-existing data from other clusters before it can begin - // receiving live replication updates and serving - // [Data API][google.bigtable.v2.BigtableService] requests. - INITIALIZING = 1; - - // The table is temporarily unable to serve - // [Data API][google.bigtable.v2.BigtableService] requests from this - // cluster due to planned internal maintenance. - PLANNED_MAINTENANCE = 2; - - // The table is temporarily unable to serve - // [Data API][google.bigtable.v2.BigtableService] requests from this - // cluster due to unplanned or emergency maintenance. - UNPLANNED_MAINTENANCE = 3; - - // The table can serve - // [Data API][google.bigtable.v2.BigtableService] requests from this - // cluster. Depending on replication delay, reads may not immediately - // reflect the state of the table in other clusters. - READY = 4; - } - - // The state of replication for the table in this cluster. - // @OutputOnly - ReplicationState replication_state = 1; - } - + // Possible timestamp granularities to use when keeping multiple versions + // of data in a table. enum TimestampGranularity { + // The user did not specify a granularity. Should not be returned. + // When specified during table creation, MILLIS will be used. TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; + // The table keeps data versioned at a granularity of 1ms. MILLIS = 1; } @@ -77,10 +49,6 @@ message Table { // Only populates `name` and fields related to the table's schema. SCHEMA_VIEW = 2; - // Only populates `name` and fields related to the table's - // replication state. - REPLICATION_VIEW = 3; - // Populates all fields. FULL = 4; } @@ -91,14 +59,6 @@ message Table { // @OutputOnly string name = 1; - // Map from cluster ID to per-cluster table state. - // If it could not be determined whether or not the table has data in a - // particular cluster (for example, if its zone is unavailable), then - // there will be an entry for the cluster with UNKNOWN `replication_status`. - // Views: REPLICATION_VIEW, FULL - // @OutputOnly - map cluster_states = 2; - // The column families configured for this table, mapped by column family ID. // Views: SCHEMA_VIEW, FULL // @CreationOnly diff --git a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py index 4d02b6e71bb8..9da2364b7866 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py @@ -14,7 +14,6 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated_v2 import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 from gcloud.bigtable._generated_v2 import instance_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2 from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 @@ -25,9 +24,9 @@ name='google/bigtable/admin/v2/bigtable_instance_admin.proto', package='google.bigtable.admin.v2', syntax='proto3', - serialized_pb=_b('\n6google/bigtable/admin/v2/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a%google/bigtable/admin/v2/common.proto\x1a\'google/bigtable/admin/v2/instance.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x95\x02\n\x15\x43reateInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"R\n\x14ListInstancesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"l\n\x14\x43reateClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"7\n\x13ListClustersRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xd3\x0b\n\x15\x42igtableInstanceAdmin\x12\x8c\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\"*\x82\xd3\xe4\x93\x02$\"\x1f/v2/{name=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x99\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v2/{name=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\";\x82\xd3\xe4\x93\x02\x35\"*/v2/{name=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa1\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}B<\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01\x62\x06proto3') + serialized_pb=_b('\n6google/bigtable/admin/v2/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\'google/bigtable/admin/v2/instance.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xdb\x0b\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}B<\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01\x62\x06proto3') , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -66,8 +65,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=489, - serialized_end=571, + serialized_start=452, + serialized_end=534, ) _CREATEINSTANCEREQUEST = _descriptor.Descriptor( @@ -78,7 +77,7 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.CreateInstanceRequest.name', index=0, + name='parent', full_name='google.bigtable.admin.v2.CreateInstanceRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -117,8 +116,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=294, - serialized_end=571, + serialized_start=255, + serialized_end=534, ) @@ -148,8 +147,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=573, - serialized_end=607, + serialized_start=536, + serialized_end=570, ) @@ -161,22 +160,15 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.ListInstancesRequest.name', index=0, + name='parent', full_name='google.bigtable.admin.v2.ListInstancesRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesRequest.failed_locations', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=2, - number=3, type=9, cpp_type=9, label=1, + name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -193,8 +185,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=609, - serialized_end=691, + serialized_start=572, + serialized_end=630, ) @@ -213,8 +205,15 @@ is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=1, - number=2, type=9, cpp_type=9, label=1, + name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -231,8 +230,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=693, - serialized_end=796, + serialized_start=633, + serialized_end=762, ) @@ -262,8 +261,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=798, - serialized_end=835, + serialized_start=764, + serialized_end=801, ) @@ -275,7 +274,7 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.CreateClusterRequest.name', index=0, + name='parent', full_name='google.bigtable.admin.v2.CreateClusterRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -307,8 +306,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=837, - serialized_end=945, + serialized_start=803, + serialized_end=913, ) @@ -338,8 +337,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=947, - serialized_end=980, + serialized_start=915, + serialized_end=948, ) @@ -351,7 +350,7 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.ListClustersRequest.name', index=0, + name='parent', full_name='google.bigtable.admin.v2.ListClustersRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -376,8 +375,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=982, - serialized_end=1037, + serialized_start=950, + serialized_end=1007, ) @@ -421,8 +420,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1039, - serialized_end=1165, + serialized_start=1009, + serialized_end=1135, ) @@ -452,8 +451,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1167, - serialized_end=1203, + serialized_start=1137, + serialized_end=1173, ) @@ -497,8 +496,53 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1206, - serialized_end=1404, + serialized_start=1176, + serialized_end=1374, +) + + +_UPDATECLUSTERMETADATA = _descriptor.Descriptor( + name='UpdateClusterMetadata', + full_name='google.bigtable.admin.v2.UpdateClusterMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1377, + serialized_end=1560, ) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER @@ -511,6 +555,9 @@ _CREATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _CREATEINSTANCEREQUEST _CREATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _CREATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST DESCRIPTOR.message_types_by_name['GetInstanceRequest'] = _GETINSTANCEREQUEST DESCRIPTOR.message_types_by_name['ListInstancesRequest'] = _LISTINSTANCESREQUEST @@ -522,6 +569,7 @@ DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST DESCRIPTOR.message_types_by_name['CreateInstanceMetadata'] = _CREATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), dict( @@ -608,118 +656,352 @@ )) _sym_db.RegisterMessage(CreateInstanceMetadata) +UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict( + DESCRIPTOR = _UPDATECLUSTERMETADATA, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) + )) +_sym_db.RegisterMessage(UpdateClusterMetadata) + DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True _CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -import abc -import six + from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities + +class BigtableInstanceAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', + request_serializer=CreateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', + request_serializer=GetInstanceRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ) + self.ListInstances = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', + request_serializer=ListInstancesRequest.SerializeToString, + response_deserializer=ListInstancesResponse.FromString, + ) + self.UpdateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', + request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ) + self.DeleteInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', + request_serializer=DeleteInstanceRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', + request_serializer=CreateClusterRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', + request_serializer=GetClusterRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ) + self.ListClusters = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', + request_serializer=ListClustersRequest.SerializeToString, + response_deserializer=ListClustersResponse.FromString, + ) + self.UpdateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', + request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', + request_serializer=DeleteClusterRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class BigtableInstanceAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + + def CreateInstance(self, request, context): + """Create an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetInstance(self, request, context): + """Gets information about an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListInstances(self, request, context): + """Lists information about instances in a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateInstance(self, request, context): + """Updates an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetCluster(self, request, context): + """Gets information about a cluster. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableInstanceAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateInstance': grpc.unary_unary_rpc_method_handler( + servicer.CreateInstance, + request_deserializer=CreateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetInstance': grpc.unary_unary_rpc_method_handler( + servicer.GetInstance, + request_deserializer=GetInstanceRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ), + 'ListInstances': grpc.unary_unary_rpc_method_handler( + servicer.ListInstances, + request_deserializer=ListInstancesRequest.FromString, + response_serializer=ListInstancesResponse.SerializeToString, + ), + 'UpdateInstance': grpc.unary_unary_rpc_method_handler( + servicer.UpdateInstance, + request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ), + 'DeleteInstance': grpc.unary_unary_rpc_method_handler( + servicer.DeleteInstance, + request_deserializer=DeleteInstanceRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'CreateCluster': grpc.unary_unary_rpc_method_handler( + servicer.CreateCluster, + request_deserializer=CreateClusterRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetCluster': grpc.unary_unary_rpc_method_handler( + servicer.GetCluster, + request_deserializer=GetClusterRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ), + 'ListClusters': grpc.unary_unary_rpc_method_handler( + servicer.ListClusters, + request_deserializer=ListClustersRequest.FromString, + response_serializer=ListClustersResponse.SerializeToString, + ), + 'UpdateCluster': grpc.unary_unary_rpc_method_handler( + servicer.UpdateCluster, + request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'DeleteCluster': grpc.unary_unary_rpc_method_handler( + servicer.DeleteCluster, + request_deserializer=DeleteClusterRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableInstanceAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + class BetaBigtableInstanceAdminServicer(object): - """""" + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ def CreateInstance(self, request, context): + """Create an instance within a project. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def GetInstance(self, request, context): + """Gets information about an instance. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListInstances(self, request, context): + """Lists information about instances in a project. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def UpdateInstance(self, request, context): + """Updates an instance within a project. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def GetCluster(self, request, context): + """Gets information about a cluster. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + class BetaBigtableInstanceAdminStub(object): - """The interface to which stubs will conform.""" - def CreateInstance(self, request, timeout): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + def CreateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Create an instance within a project. + """ raise NotImplementedError() CreateInstance.future = None - def GetInstance(self, request, timeout): + def GetInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets information about an instance. + """ raise NotImplementedError() GetInstance.future = None - def ListInstances(self, request, timeout): + def ListInstances(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists information about instances in a project. + """ raise NotImplementedError() ListInstances.future = None - def UpdateInstance(self, request, timeout): + def UpdateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates an instance within a project. + """ raise NotImplementedError() UpdateInstance.future = None - def DeleteInstance(self, request, timeout): + def DeleteInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Delete an instance from a project. + """ raise NotImplementedError() DeleteInstance.future = None - def CreateCluster(self, request, timeout): + def CreateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Creates a cluster within an instance. + """ raise NotImplementedError() CreateCluster.future = None - def GetCluster(self, request, timeout): + def GetCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets information about a cluster. + """ raise NotImplementedError() GetCluster.future = None - def ListClusters(self, request, timeout): + def ListClusters(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists information about clusters in an instance. + """ raise NotImplementedError() ListClusters.future = None - def UpdateCluster(self, request, timeout): + def UpdateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates a cluster within an instance. + """ raise NotImplementedError() UpdateCluster.future = None - def DeleteCluster(self, request, timeout): + def DeleteCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Deletes a cluster from an instance. + """ raise NotImplementedError() DeleteCluster.future = None + def beta_create_BigtableInstanceAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.longrunning.operations_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.protobuf.empty_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.longrunning.operations_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.longrunning.operations_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.protobuf.empty_pb2 request_deserializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateClusterRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateInstanceRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteClusterRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteInstanceRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetClusterRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetInstanceRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, } response_serializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.longrunning.operations_pb2.Operation.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersResponse.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesResponse.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, } method_implementations = { ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), @@ -736,50 +1018,31 @@ def beta_create_BigtableInstanceAdmin_server(servicer, pool=None, pool_size=None server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) + def beta_create_BigtableInstanceAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.longrunning.operations_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.protobuf.empty_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.longrunning.operations_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.bigtable.admin.v2.instance_pb2 - import google.longrunning.operations_pb2 - import google.bigtable.admin.v2.bigtable_instance_admin_pb2 - import google.protobuf.empty_pb2 request_serializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateClusterRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateInstanceRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteClusterRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteInstanceRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetClusterRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetInstanceRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, } response_deserializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.longrunning.operations_pb2.Operation.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.longrunning.operations_pb2.Operation.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersResponse.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesResponse.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.longrunning.operations_pb2.Operation.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, } cardinalities = { 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, diff --git a/gcloud/bigtable/_generated_v2/bigtable_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_pb2.py index ffb5f5fa3eba..606b3c826942 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_pb2.py @@ -804,74 +804,259 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) -import abc -import six + from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=ReadRowsRequest.SerializeToString, + response_deserializer=ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=SampleRowKeysRequest.SerializeToString, + response_deserializer=SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=MutateRowRequest.SerializeToString, + response_deserializer=MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=MutateRowsRequest.SerializeToString, + response_deserializer=MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=CheckAndMutateRowRequest.SerializeToString, + response_deserializer=CheckAndMutateRowResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=ReadModifyWriteRowResponse.FromString, + ) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=ReadRowsRequest.FromString, + response_serializer=ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=SampleRowKeysRequest.FromString, + response_serializer=SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=MutateRowRequest.FromString, + response_serializer=MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=MutateRowsRequest.FromString, + response_serializer=MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=CheckAndMutateRowRequest.FromString, + response_serializer=CheckAndMutateRowResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=ReadModifyWriteRowRequest.FromString, + response_serializer=ReadModifyWriteRowResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + class BetaBigtableServicer(object): - """""" + """Service for reading from and writing to existing Bigtable tables. + """ def ReadRows(self, request, context): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + class BetaBigtableStub(object): - """The interface to which stubs will conform.""" - def ReadRows(self, request, timeout): + """Service for reading from and writing to existing Bigtable tables. + """ + def ReadRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ raise NotImplementedError() - def SampleRowKeys(self, request, timeout): + def SampleRowKeys(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ raise NotImplementedError() - def MutateRow(self, request, timeout): + def MutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ raise NotImplementedError() MutateRow.future = None - def MutateRows(self, request, timeout): + def MutateRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ raise NotImplementedError() - def CheckAndMutateRow(self, request, timeout): + def CheckAndMutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ raise NotImplementedError() CheckAndMutateRow.future = None - def ReadModifyWriteRow(self, request, timeout): + def ReadModifyWriteRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ raise NotImplementedError() ReadModifyWriteRow.future = None + def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 request_deserializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsRequest.FromString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsRequest.FromString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysRequest.FromString, + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.FromString, } response_serializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.SerializeToString, } method_implementations = { ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), @@ -884,34 +1069,23 @@ def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_tim server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) + def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 - import google.bigtable.v2.bigtable_pb2 request_serializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.SerializeToString, } response_deserializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsResponse.FromString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsResponse.FromString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysResponse.FromString, + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.FromString, } cardinalities = { 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, diff --git a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py index 8a884a8b91e3..c929b222b78b 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py @@ -22,7 +22,7 @@ name='google/bigtable/admin/v2/bigtable_table_admin.proto', package='google.bigtable.admin.v2', syntax='proto3', - serialized_pb=_b('\n3google/bigtable/admin/v2/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a$google/bigtable/admin/v2/table.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xc6\x01\n\x12\x43reateTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"i\n\x11ListTablesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod2\xb4\x07\n\x12\x42igtableTableAdmin\x12\x91\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"3\x82\xd3\xe4\x93\x02-\"(/v2/{name=projects/*/instances/*}/tables:\x01*\x12\x99\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"0\x82\xd3\xe4\x93\x02*\x12(/v2/{name=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*B9\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01\x62\x06proto3') + serialized_pb=_b('\n3google/bigtable/admin/v2/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a$google/bigtable/admin/v2/table.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"k\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod2\xb8\x07\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*B9\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01\x62\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -56,8 +56,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=357, - serialized_end=377, + serialized_start=359, + serialized_end=379, ) _CREATETABLEREQUEST = _descriptor.Descriptor( @@ -68,7 +68,7 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.CreateTableRequest.name', index=0, + name='parent', full_name='google.bigtable.admin.v2.CreateTableRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -108,7 +108,7 @@ oneofs=[ ], serialized_start=179, - serialized_end=377, + serialized_end=379, ) @@ -155,8 +155,8 @@ name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target', index=0, containing_type=None, fields=[]), ], - serialized_start=379, - serialized_end=488, + serialized_start=381, + serialized_end=490, ) @@ -168,7 +168,7 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.ListTablesRequest.name', index=0, + name='parent', full_name='google.bigtable.admin.v2.ListTablesRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -200,8 +200,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=490, - serialized_end=595, + serialized_start=492, + serialized_end=599, ) @@ -238,8 +238,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=597, - serialized_end=691, + serialized_start=601, + serialized_end=695, ) @@ -276,8 +276,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=693, - serialized_end=776, + serialized_start=697, + serialized_end=780, ) @@ -307,8 +307,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=778, - serialized_end=812, + serialized_start=782, + serialized_end=816, ) @@ -362,8 +362,8 @@ name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', index=0, containing_type=None, fields=[]), ], - serialized_start=952, - serialized_end=1117, + serialized_start=956, + serialized_end=1121, ) _MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( @@ -399,8 +399,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=815, - serialized_end=1117, + serialized_start=819, + serialized_end=1121, ) _CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST @@ -504,77 +504,243 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) -import abc -import six + from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities + +class BigtableTableAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', + request_serializer=CreateTableRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.ListTables = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', + request_serializer=ListTablesRequest.SerializeToString, + response_deserializer=ListTablesResponse.FromString, + ) + self.GetTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', + request_serializer=GetTableRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.DeleteTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', + request_serializer=DeleteTableRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ModifyColumnFamilies = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', + request_serializer=ModifyColumnFamiliesRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.DropRowRange = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', + request_serializer=DropRowRangeRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class BigtableTableAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ModifyColumnFamilies(self, request, context): + """Atomically performs a series of column family modifications + on the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableTableAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateTable': grpc.unary_unary_rpc_method_handler( + servicer.CreateTable, + request_deserializer=CreateTableRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'ListTables': grpc.unary_unary_rpc_method_handler( + servicer.ListTables, + request_deserializer=ListTablesRequest.FromString, + response_serializer=ListTablesResponse.SerializeToString, + ), + 'GetTable': grpc.unary_unary_rpc_method_handler( + servicer.GetTable, + request_deserializer=GetTableRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'DeleteTable': grpc.unary_unary_rpc_method_handler( + servicer.DeleteTable, + request_deserializer=DeleteTableRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler( + servicer.ModifyColumnFamilies, + request_deserializer=ModifyColumnFamiliesRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'DropRowRange': grpc.unary_unary_rpc_method_handler( + servicer.DropRowRange, + request_deserializer=DropRowRangeRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + class BetaBigtableTableAdminServicer(object): - """""" + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ModifyColumnFamilies(self, request, context): + """Atomically performs a series of column family modifications + on the specified table. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + class BetaBigtableTableAdminStub(object): - """The interface to which stubs will conform.""" - def CreateTable(self, request, timeout): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + def CreateTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ raise NotImplementedError() CreateTable.future = None - def ListTables(self, request, timeout): + def ListTables(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists all tables served from a specified instance. + """ raise NotImplementedError() ListTables.future = None - def GetTable(self, request, timeout): + def GetTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets metadata information about the specified table. + """ raise NotImplementedError() GetTable.future = None - def DeleteTable(self, request, timeout): + def DeleteTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Permanently deletes a specified table and all of its data. + """ raise NotImplementedError() DeleteTable.future = None - def ModifyColumnFamilies(self, request, timeout): + def ModifyColumnFamilies(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Atomically performs a series of column family modifications + on the specified table. + """ raise NotImplementedError() ModifyColumnFamilies.future = None - def DropRowRange(self, request, timeout): + def DropRowRange(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ raise NotImplementedError() DropRowRange.future = None + def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.table_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.table_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.protobuf.empty_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.table_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.protobuf.empty_pb2 request_deserializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.CreateTableRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DeleteTableRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DropRowRangeRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.GetTableRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ModifyColumnFamiliesRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.FromString, } response_serializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, } method_implementations = { ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), @@ -587,34 +753,23 @@ def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, d server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) + def beta_create_BigtableTableAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.table_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.table_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.protobuf.empty_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.bigtable.admin.v2.table_pb2 - import google.bigtable.admin.v2.bigtable_table_admin_pb2 - import google.protobuf.empty_pb2 request_serializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.CreateTableRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DeleteTableRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DropRowRangeRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.GetTableRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ModifyColumnFamiliesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.SerializeToString, } response_deserializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.table_pb2.Table.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.table_pb2.Table.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.table_pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, } cardinalities = { 'CreateTable': cardinality.Cardinality.UNARY_UNARY, diff --git a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py index e69de29bb2d1..66491c348817 100644 --- a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py +++ b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py @@ -0,0 +1,256 @@ + +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class OperationsStub(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetOperation = channel.unary_unary( + '/google.longrunning.Operations/GetOperation', + request_serializer=GetOperationRequest.SerializeToString, + response_deserializer=Operation.FromString, + ) + self.ListOperations = channel.unary_unary( + '/google.longrunning.Operations/ListOperations', + request_serializer=ListOperationsRequest.SerializeToString, + response_deserializer=ListOperationsResponse.FromString, + ) + self.CancelOperation = channel.unary_unary( + '/google.longrunning.Operations/CancelOperation', + request_serializer=CancelOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.DeleteOperation = channel.unary_unary( + '/google.longrunning.Operations/DeleteOperation', + request_serializer=DeleteOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class OperationsServicer(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def GetOperation(self, request, context): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListOperations(self, request, context): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CancelOperation(self, request, context): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteOperation(self, request, context): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_OperationsServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetOperation': grpc.unary_unary_rpc_method_handler( + servicer.GetOperation, + request_deserializer=GetOperationRequest.FromString, + response_serializer=Operation.SerializeToString, + ), + 'ListOperations': grpc.unary_unary_rpc_method_handler( + servicer.ListOperations, + request_deserializer=ListOperationsRequest.FromString, + response_serializer=ListOperationsResponse.SerializeToString, + ), + 'CancelOperation': grpc.unary_unary_rpc_method_handler( + servicer.CancelOperation, + request_deserializer=CancelOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'DeleteOperation': grpc.unary_unary_rpc_method_handler( + servicer.DeleteOperation, + request_deserializer=DeleteOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.longrunning.Operations', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaOperationsServicer(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + def GetOperation(self, request, context): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListOperations(self, request, context): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CancelOperation(self, request, context): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteOperation(self, request, context): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaOperationsStub(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + raise NotImplementedError() + GetOperation.future = None + def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + raise NotImplementedError() + ListOperations.future = None + def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + raise NotImplementedError() + CancelOperation.future = None + def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + raise NotImplementedError() + DeleteOperation.future = None + + +def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, + } + response_serializers = { + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, + } + method_implementations = { + ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), + ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), + ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), + ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, + } + response_deserializers = { + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, + } + cardinalities = { + 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, + 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, + 'GetOperation': cardinality.Cardinality.UNARY_UNARY, + 'ListOperations': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options) diff --git a/gcloud/bigtable/_generated_v2/table_pb2.py b/gcloud/bigtable/_generated_v2/table_pb2.py index e39091d88351..840076514cc7 100644 --- a/gcloud/bigtable/_generated_v2/table_pb2.py +++ b/gcloud/bigtable/_generated_v2/table_pb2.py @@ -13,6 +13,7 @@ _sym_db = _symbol_database.Default() +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 @@ -20,47 +21,13 @@ name='google/bigtable/admin/v2/table.proto', package='google.bigtable.admin.v2', syntax='proto3', - serialized_pb=_b('\n$google/bigtable/admin/v2/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1egoogle/protobuf/duration.proto\"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState\"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04ruleB,\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01\x62\x06proto3') + serialized_pb=_b('\n$google/bigtable/admin/v2/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xa0\x03\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"F\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04ruleB,\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01\x62\x06proto3') , - dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) -_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( - name='ReplicationState', - full_name='google.bigtable.admin.v2.Table.ClusterState.ReplicationState', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STATE_NOT_KNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INITIALIZING', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PLANNED_MAINTENANCE', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='UNPLANNED_MAINTENANCE', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=4, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=458, - serialized_end=578, -) -_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) - _TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( name='TimestampGranularity', full_name='google.bigtable.admin.v2.Table.TimestampGranularity', @@ -78,8 +45,8 @@ ], containing_type=None, options=None, - serialized_start=775, - serialized_end=848, + serialized_start=400, + serialized_end=473, ) _sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) @@ -102,90 +69,18 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='REPLICATION_VIEW', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FULL', index=4, number=4, + name='FULL', index=3, number=4, options=None, type=None), ], containing_type=None, options=None, - serialized_start=850, - serialized_end=942, + serialized_start=475, + serialized_end=545, ) _sym_db.RegisterEnumDescriptor(_TABLE_VIEW) -_TABLE_CLUSTERSTATE = _descriptor.Descriptor( - name='ClusterState', - full_name='google.bigtable.admin.v2.Table.ClusterState', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='replication_state', full_name='google.bigtable.admin.v2.Table.ClusterState.replication_state', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _TABLE_CLUSTERSTATE_REPLICATIONSTATE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=352, - serialized_end=578, -) - -_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( - name='ClusterStatesEntry', - full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=580, - serialized_end=678, -) - _TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( name='ColumnFamiliesEntry', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry', @@ -219,8 +114,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=680, - serialized_end=773, + serialized_start=305, + serialized_end=398, ) _TABLE = _descriptor.Descriptor( @@ -238,21 +133,14 @@ is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='cluster_states', full_name='google.bigtable.admin.v2.Table.cluster_states', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=2, + name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=1, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=3, + name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=2, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -261,7 +149,7 @@ ], extensions=[ ], - nested_types=[_TABLE_CLUSTERSTATE, _TABLE_CLUSTERSTATESENTRY, _TABLE_COLUMNFAMILIESENTRY, ], + nested_types=[_TABLE_COLUMNFAMILIESENTRY, ], enum_types=[ _TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW, @@ -272,8 +160,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=99, - serialized_end=942, + serialized_start=129, + serialized_end=545, ) @@ -303,8 +191,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=944, - serialized_end=1009, + serialized_start=547, + serialized_end=612, ) @@ -334,8 +222,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1224, - serialized_end=1287, + serialized_start=827, + serialized_end=890, ) _GCRULE_UNION = _descriptor.Descriptor( @@ -364,8 +252,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1289, - serialized_end=1345, + serialized_start=892, + serialized_end=948, ) _GCRULE = _descriptor.Descriptor( @@ -418,18 +306,12 @@ name='rule', full_name='google.bigtable.admin.v2.GcRule.rule', index=0, containing_type=None, fields=[]), ], - serialized_start=1012, - serialized_end=1353, + serialized_start=615, + serialized_end=956, ) -_TABLE_CLUSTERSTATE.fields_by_name['replication_state'].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE -_TABLE_CLUSTERSTATE.containing_type = _TABLE -_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.fields_by_name['value'].message_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE _TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY _TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name['cluster_states'].message_type = _TABLE_CLUSTERSTATESENTRY _TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY _TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY _TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE @@ -460,20 +342,6 @@ Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( - ClusterState = _reflection.GeneratedProtocolMessageType('ClusterState', (_message.Message,), dict( - DESCRIPTOR = _TABLE_CLUSTERSTATE, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) - )) - , - - ClusterStatesEntry = _reflection.GeneratedProtocolMessageType('ClusterStatesEntry', (_message.Message,), dict( - DESCRIPTOR = _TABLE_CLUSTERSTATESENTRY, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) - )) - , - ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, __module__ = 'google.bigtable.admin.v2.table_pb2' @@ -485,8 +353,6 @@ # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) )) _sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ClusterState) -_sym_db.RegisterMessage(Table.ClusterStatesEntry) _sym_db.RegisterMessage(Table.ColumnFamiliesEntry) ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( @@ -522,8 +388,6 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001')) -_TABLE_CLUSTERSTATESENTRY.has_options = True -_TABLE_CLUSTERSTATESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _TABLE_COLUMNFAMILIESENTRY.has_options = True _TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) # @@protoc_insertion_point(module_scope) diff --git a/scripts/make_operations_grpc.py b/scripts/make_operations_grpc.py index 0b6a1e8ebc38..0e779964f3b0 100644 --- a/scripts/make_operations_grpc.py +++ b/scripts/make_operations_grpc.py @@ -23,15 +23,13 @@ ROOT_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) -PROTOS_DIR = os.path.join(ROOT_DIR, 'cloud-bigtable-client', - 'bigtable-protos', 'src', 'main', 'proto') +PROTOS_DIR = os.path.join(ROOT_DIR, 'googleapis-pb') PROTO_PATH = os.path.join(PROTOS_DIR, 'google', 'longrunning', 'operations.proto') GENERATED_SUBDIR = os.environ.get('GENERATED_SUBDIR', '_generated') GRPC_ONLY_FILE = os.path.join(ROOT_DIR, 'gcloud', 'bigtable', GENERATED_SUBDIR, 'operations_grpc_pb2.py') -PROTOC_CMD = os.environ.get('PROTOC_CMD', 'protoc') -GRPC_PLUGIN = os.environ.get('GRPC_PLUGIN', 'grpc_python_plugin') +GRPCIO_VIRTUALENV = os.environ.get('GRPCIO_VIRTUALENV', 'protoc') def get_pb2_contents_with_grpc(): @@ -45,14 +43,14 @@ def get_pb2_contents_with_grpc(): 'operations_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', temp_dir, - '--plugin', - 'protoc-gen-grpc=' + GRPC_PLUGIN, - '--grpc_out', + '--grpc_python_out', temp_dir, PROTO_PATH, ]) @@ -75,7 +73,9 @@ def get_pb2_contents_without_grpc(): 'operations_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', diff --git a/scripts/rewrite_imports.py b/scripts/rewrite_imports.py index 5717a50fd8e3..d6523d4d5410 100644 --- a/scripts/rewrite_imports.py +++ b/scripts/rewrite_imports.py @@ -112,6 +112,10 @@ def transform_line(line): :rtype: str :returns: The transformed line. """ + # Work around https://github.com/grpc/grpc/issues/7101 + if line == 'import ': + return '' + for old_module, new_module in REPLACEMENTS.iteritems(): result = transform_old_to_new(line, old_module, new_module) if result is not None: From d97d0309738adc6d947107d05381d1e81df1a1a8 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 00:38:57 -0400 Subject: [PATCH 040/103] Remove duplicated line. --- Makefile.bigtable_v2 | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile.bigtable_v2 b/Makefile.bigtable_v2 index 1c9bdae13930..05681b1d55ed 100644 --- a/Makefile.bigtable_v2 +++ b/Makefile.bigtable_v2 @@ -40,7 +40,6 @@ generate: # Move the newly generated *_pb2.py files into our library. cp $(GENERATED_DIR)/google/bigtable/v2/* $(BIGTABLE_DIR) cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR) - cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR) # Remove all existing *.proto files before we replace rm -f $(BIGTABLE_DIR)/*.proto # Copy over the *.proto files into our library. From 4a223b74668bc68ea744709a08c93b574f9f3b2a Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 00:40:47 -0400 Subject: [PATCH 041/103] Use 'grpcio-tools' in a virtualenv to generate from latest Bigtable V1 protos. This is a better hack for #1482, but we still really want #1384. --- Makefile.bigtable_v1 | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/Makefile.bigtable_v1 b/Makefile.bigtable_v1 index a2db4197cbbb..d6e6a247a06f 100644 --- a/Makefile.bigtable_v1 +++ b/Makefile.bigtable_v1 @@ -1,11 +1,11 @@ +GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv GENERATED_DIR=$(shell pwd)/generated_python GENERATED_SUBDIR=_generated BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) -GRPC_PLUGIN=grpc_python_plugin -PROTOC_CMD=protoc -BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client -BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-protos/src/main/proto +PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb +BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client +BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-client-core-parent/bigtable-protos/src/main/proto help: @echo 'Makefile for gcloud-python Bigtable protos ' @@ -15,19 +15,22 @@ help: @echo ' make clean Clean generated files ' generate: + # Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools + [ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV) + $(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools # Retrieve git repos that have our *.proto files. [ -d $(BIGTABLE_CHECKOUT_DIR) ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 cd $(BIGTABLE_CHECKOUT_DIR) && git pull origin master - [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 - cd googleapis-pb && git pull origin master + [ -d $(GOOGLEAPIS_PROTOS_DIR) ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 + cd $(GOOGLEAPIS_PROTOS_DIR) && git pull origin master # Make the directory where our *_pb2.py files will go. mkdir -p $(GENERATED_DIR) # Generate all *_pb2.py files that require gRPC. $(PROTOC_CMD) \ --proto_path=$(BIGTABLE_PROTOS_DIR) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ --python_out=$(GENERATED_DIR) \ - --plugin=protoc-gen-grpc=$(GRPC_PLUGIN) \ - --grpc_out=$(GENERATED_DIR) \ + --grpc_python_out=$(GENERATED_DIR) \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/bigtable_service.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_service.proto @@ -52,7 +55,7 @@ generate: cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/*.proto $(BIGTABLE_DIR) cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/*.proto $(BIGTABLE_DIR) cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/*.proto $(BIGTABLE_DIR) - cp $(BIGTABLE_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) + cp $(GOOGLEAPIS_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) # Rename all *.proto files in our library with an # underscore and remove executable bit. cd $(BIGTABLE_DIR) && \ @@ -63,8 +66,8 @@ generate: # Separate the gRPC parts of the operations service from the # non-gRPC parts so that the protos from `googleapis-common-protos` # can be used without gRPC. - PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \ - GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ + GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ python scripts/make_operations_grpc.py # Rewrite the imports in the generated *_pb2.py files. python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py @@ -73,6 +76,6 @@ check_generate: python scripts/check_generate.py clean: - rm -fr $(BIGTABLE_CHECKOUT_DIR) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) + rm -fr $(GRPCIO_VIRTUALENV) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) .PHONY: generate check_generate clean From 568a463060ec34ba7fa6372a6f465d055cbc4b12 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 00:47:56 -0400 Subject: [PATCH 042/103] Use 'grpcio-tools' in a virtualenv to generate from latest datastore V1beta3 protos. This is a better hack for #1482, but we still really want #1384. --- Makefile.datastore | 10 +++++++--- scripts/make_datastore_grpc.py | 15 ++++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/Makefile.datastore b/Makefile.datastore index 02f430c53c50..73665ef5f542 100644 --- a/Makefile.datastore +++ b/Makefile.datastore @@ -1,7 +1,7 @@ +GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv GENERATED_DIR=$(shell pwd)/generated_python DATASTORE_DIR=$(shell pwd)/gcloud/datastore/_generated -GRPC_PLUGIN=grpc_python_plugin -PROTOC_CMD=protoc +PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb help: @@ -12,6 +12,9 @@ help: @echo ' make clean Clean generated files ' generate: + # Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools + [ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV) + $(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools # Retrieve git repos that have our *.proto files. [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 cd googleapis-pb && git pull origin master @@ -39,7 +42,8 @@ generate: done # Separate the gRPC parts of the datastore service from the # non-gRPC parts so that the protos can be used without gRPC. - PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \ + GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ python scripts/make_datastore_grpc.py # Rewrite the imports in the generated *_pb2.py files. python scripts/rewrite_imports.py $(DATASTORE_DIR)/*pb2.py diff --git a/scripts/make_datastore_grpc.py b/scripts/make_datastore_grpc.py index 5c460511a889..d1a50b009200 100644 --- a/scripts/make_datastore_grpc.py +++ b/scripts/make_datastore_grpc.py @@ -28,8 +28,7 @@ 'v1beta3', 'datastore.proto') GRPC_ONLY_FILE = os.path.join(ROOT_DIR, 'gcloud', 'datastore', '_generated', 'datastore_grpc_pb2.py') -PROTOC_CMD = os.environ.get('PROTOC_CMD', 'protoc') -GRPC_PLUGIN = os.environ.get('GRPC_PLUGIN', 'grpc_python_plugin') +GRPCIO_VIRTUALENV = os.environ.get('GRPCIO_VIRTUALENV', 'protoc') def get_pb2_contents_with_grpc(): @@ -43,14 +42,14 @@ def get_pb2_contents_with_grpc(): 'v1beta3', 'datastore_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', temp_dir, - '--plugin', - 'protoc-gen-grpc=' + GRPC_PLUGIN, - '--grpc_out', + '--grpc_python_out', temp_dir, PROTO_PATH, ]) @@ -73,7 +72,9 @@ def get_pb2_contents_without_grpc(): 'v1beta3', 'datastore_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', From 24989d60f06bf20f8868cf9100509211c7f14c2f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 01:00:55 -0400 Subject: [PATCH 043/103] Accomodate final V2 renames: 'name' -> 'parent'. Closes #1918. --- gcloud/bigtable/cluster.py | 2 +- gcloud/bigtable/instance.py | 6 +++--- gcloud/bigtable/table.py | 2 +- gcloud/bigtable/test_cluster.py | 2 +- gcloud/bigtable/test_instance.py | 8 ++++---- gcloud/bigtable/test_table.py | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index a65a5daa945a..bb5cfb349498 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -50,7 +50,7 @@ def _prepare_create_request(cluster): :returns: The CreateCluster request object containing the cluster info. """ return messages_v2_pb2.CreateClusterRequest( - name=cluster._instance.name, + parent=cluster._instance.name, cluster_id=cluster.cluster_id, cluster=data_v2_pb2.Cluster( serve_nodes=cluster.serve_nodes, diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py index ba29e43a57c2..c33d8a64c9e1 100644 --- a/gcloud/bigtable/instance.py +++ b/gcloud/bigtable/instance.py @@ -54,7 +54,7 @@ def _prepare_create_request(instance): """ parent_name = ('projects/' + instance._client.project) return messages_v2_pb2.CreateInstanceRequest( - name=parent_name, + parent=parent_name, instance_id=instance.instance_id, instance=data_v2_pb2.Instance( display_name=instance.display_name, @@ -412,7 +412,7 @@ def list_clusters(self): returned and the second is a list of strings (the failed locations in the request). """ - request_pb = messages_v2_pb2.ListClustersRequest(name=self.name) + request_pb = messages_v2_pb2.ListClustersRequest(parent=self.name) # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` list_clusters_response = self._client._instance_stub.ListClusters( request_pb, self._client.timeout_seconds) @@ -442,7 +442,7 @@ def list_tables(self): :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. """ - request_pb = table_messages_v2_pb2.ListTablesRequest(name=self.name) + request_pb = table_messages_v2_pb2.ListTablesRequest(parent=self.name) # We expect a `table_messages_v2_pb2.ListTablesResponse` table_list_pb = self._client._table_stub.ListTables( request_pb, self._client.timeout_seconds) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 159fc4566c42..6c2cfd82fac9 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -170,7 +170,7 @@ def create(self, initial_split_keys=None): split_pb(key=key) for key in initial_split_keys] request_pb = table_admin_messages_v2_pb2.CreateTableRequest( initial_splits=initial_split_keys or [], - name=self._instance.name, + parent=self._instance.name, table_id=self.table_id, ) client = self._instance._client diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index 6ddca98bf92e..a1a43b5533cc 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -497,7 +497,7 @@ def test_it(self): request_pb = self._callFUT(cluster) self.assertEqual(request_pb.cluster_id, CLUSTER_ID) - self.assertEqual(request_pb.name, instance.name) + self.assertEqual(request_pb.parent, instance.name) self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py index fbe2f384cfb2..38e9ef959194 100644 --- a/gcloud/bigtable/test_instance.py +++ b/gcloud/bigtable/test_instance.py @@ -471,7 +471,7 @@ def test_list_clusters(self): CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) # Create request_pb request_pb = messages_v2_pb2.ListClustersRequest( - name=instance.name, + parent=instance.name, ) # Create response_pb @@ -520,7 +520,7 @@ def _list_tables_helper(self, table_name=None): # Create request_ request_pb = table_messages_v1_pb2.ListTablesRequest( - name=self.INSTANCE_NAME) + parent=self.INSTANCE_NAME) # Create response_pb if table_name is None: @@ -588,7 +588,7 @@ def test_it(self): self.assertTrue(isinstance(request_pb, messages_v2_pb.CreateInstanceRequest)) self.assertEqual(request_pb.instance_id, INSTANCE_ID) - self.assertEqual(request_pb.name, + self.assertEqual(request_pb.parent, 'projects/' + PROJECT) self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance)) self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME) @@ -637,7 +637,7 @@ def test_with_create_instance_metadata(self): request_time=Timestamp(seconds=1, nanos=1234), finish_time=Timestamp(seconds=10, nanos=891011), original_request=messages_v2_pb.CreateInstanceRequest( - name='foo', + parent='foo', instance_id='bar', instance=data_v2_pb2.Instance( display_name='quux', diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index cd47ada3ba22..df385214d77a 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -147,7 +147,7 @@ def _create_test_helper(self, initial_split_keys): for key in initial_split_keys or ()] request_pb = _CreateTableRequestPB( initial_splits=splits_pb, - name=self.INSTANCE_NAME, + parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, ) From b1d56a5b7fb93f3170c4f4fbf147544fa4611fff Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Tue, 28 Jun 2016 10:04:45 -0400 Subject: [PATCH 044/103] Add cleanup and refactor. --- gcloud/_helpers.py | 5 ++--- gcloud/test__helpers.py | 12 ++++++------ 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/gcloud/_helpers.py b/gcloud/_helpers.py index d869e2e3ecc7..834197a42f55 100644 --- a/gcloud/_helpers.py +++ b/gcloud/_helpers.py @@ -191,10 +191,9 @@ def _default_service_project_id(): search_paths = [] # Workaround for GAE not supporting pwd which is used by expanduser. try: - full_config_path = os.path.expanduser(DEFAULT_CONFIGURATION_PATH) - search_paths.append(full_config_path) + search_paths.append(os.path.expanduser(DEFAULT_CONFIGURATION_PATH)) except ImportError: - full_config_path = '' + pass win32_config_path = os.path.join(os.getenv('APPDATA', ''), 'gcloud', 'configurations', 'config_default') diff --git a/gcloud/test__helpers.py b/gcloud/test__helpers.py index a7646af2e963..3f843ef6f4d5 100644 --- a/gcloud/test__helpers.py +++ b/gcloud/test__helpers.py @@ -224,12 +224,12 @@ def test_read_from_cli_info(self): def test_gae_without_expanduser(self): import sys - import pwd - del pwd - sys.modules['pwd'] = None # Blocks pwd from being imported. - project_id = self.callFUT('test-project-id') - self.assertEqual(None, project_id) - del sys.modules['pwd'] # Unblocks importing of pwd. + try: + sys.modules['pwd'] = None # Blocks pwd from being imported. + project_id = self.callFUT('test-project-id') + self.assertEqual(None, project_id) + finally: + del sys.modules['pwd'] # Unblocks importing of pwd. def test_info_value_not_present(self): project_id = self.callFUT() From a488a8ae2c0355ae7a7cec88dc3c589c289c3691 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 11:54:50 -0400 Subject: [PATCH 045/103] More 'cluster' -> 'instance' renames. Clusters are still a (minor) feature of V2, but they are mostly of little interest. The 'instance' object takes almost all the roles for ordinary usage. --- gcloud/bigtable/client.py | 30 +++++++++++++-------------- gcloud/bigtable/cluster.py | 8 ++++---- gcloud/bigtable/test_client.py | 36 ++++++++++++++++----------------- gcloud/bigtable/test_cluster.py | 8 ++++---- 4 files changed, 41 insertions(+), 41 deletions(-) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 2ec8f6e89bd2..18c27e54272d 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -98,14 +98,14 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): :type project: :class:`str` or :func:`unicode ` :param project: (Optional) The ID of the project which owns the - clusters, tables and data. If not provided, will + instances, tables and data. If not provided, will attempt to determine from the environment. :type credentials: :class:`OAuth2Credentials ` or :data:`NoneType ` :param credentials: (Optional) The OAuth2 Credentials to use for this - cluster. If not provided, defaults to the Google + client. If not provided, defaults to the Google Application Default Credentials. :type read_only: bool @@ -162,7 +162,7 @@ def __init__(self, project=None, credentials=None, # These will be set in start(). self._data_stub_internal = None - self._cluster_stub_internal = None + self._instance_stub_internal = None self._operations_stub_internal = None self._table_stub_internal = None @@ -229,7 +229,7 @@ def _data_stub(self): return self._data_stub_internal @property - def _cluster_stub(self): + def _instance_stub(self): """Getter for the gRPC stub used for the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` @@ -240,9 +240,9 @@ def _cluster_stub(self): """ if not self._admin: raise ValueError('Client is not an admin client.') - if self._cluster_stub_internal is None: + if self._instance_stub_internal is None: raise ValueError('Client has not been started.') - return self._cluster_stub_internal + return self._instance_stub_internal @property def _operations_stub(self): @@ -285,7 +285,7 @@ def _make_data_stub(self): return _make_stub(self, DATA_STUB_FACTORY_V2, DATA_API_HOST_V2, DATA_API_PORT_V2) - def _make_cluster_stub(self): + def _make_instance_stub(self): """Creates gRPC stub to make requests to the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` @@ -340,11 +340,11 @@ def start(self): self._data_stub_internal = self._make_data_stub() self._data_stub_internal.__enter__() if self._admin: - self._cluster_stub_internal = self._make_cluster_stub() + self._instance_stub_internal = self._make_instance_stub() self._operations_stub_internal = self._make_operations_stub() self._table_stub_internal = self._make_table_stub() - self._cluster_stub_internal.__enter__() + self._instance_stub_internal.__enter__() self._operations_stub_internal.__enter__() self._table_stub_internal.__enter__() @@ -362,12 +362,12 @@ def stop(self): # traceback to __exit__. self._data_stub_internal.__exit__(None, None, None) if self._admin: - self._cluster_stub_internal.__exit__(None, None, None) + self._instance_stub_internal.__exit__(None, None, None) self._operations_stub_internal.__exit__(None, None, None) self._table_stub_internal.__exit__(None, None, None) self._data_stub_internal = None - self._cluster_stub_internal = None + self._instance_stub_internal = None self._operations_stub_internal = None self._table_stub_internal = None @@ -397,8 +397,8 @@ class _MetadataPlugin(object): """Callable class to transform metadata for gRPC requests. :type client: :class:`.client.Client` - :param client: The client that owns the cluster. Provides authorization and - user agent. + :param client: The client that owns the instance. + Provides authorization and user agent. """ def __init__(self, client): @@ -421,8 +421,8 @@ def _make_stub(client, stub_factory, host, port): Uses / depends on the beta implementation of gRPC. :type client: :class:`.client.Client` - :param client: The client that owns the cluster. Provides authorization and - user agent. + :param client: The client that owns the instance. + Provides authorization and user agent. :type stub_factory: callable :param stub_factory: A factory which will create a gRPC stub for diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index bb5cfb349498..7fd7ee3b9355 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -289,7 +289,7 @@ def reload(self): """Reload the metadata for this cluster.""" request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._instance._client._cluster_stub.GetCluster( + cluster_pb = self._instance._client._instance_stub.GetCluster( request_pb, self._instance._client.timeout_seconds) # NOTE: _update_from_pb does not check that the project, instance and @@ -318,7 +318,7 @@ def create(self): """ request_pb = _prepare_create_request(self) # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb = self._instance._client._cluster_stub.CreateCluster( + operation_pb = self._instance._client._instance_stub.CreateCluster( request_pb, self._instance._client.timeout_seconds) op_id = _process_operation(operation_pb) @@ -347,7 +347,7 @@ def update(self): serve_nodes=self.serve_nodes, ) # Ignore expected `._generated.bigtable_cluster_data_pb2.Cluster`. - operation_pb = self._instance._client._cluster_stub.UpdateCluster( + operation_pb = self._instance._client._instance_stub.UpdateCluster( request_pb, self._instance._client.timeout_seconds) op_id = _process_operation(operation_pb) @@ -380,5 +380,5 @@ def delete(self): """ request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name) # We expect a `google.protobuf.empty_pb2.Empty` - self._instance._client._cluster_stub.DeleteCluster( + self._instance._client._instance_stub.DeleteCluster( request_pb, self._instance._client.timeout_seconds) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index eeff14a5aab4..fe34180ba5af 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -49,7 +49,7 @@ def _constructor_test_helper(self, expected_scopes, creds, self.assertEqual(client.user_agent, user_agent) # Check stubs are set (but null) self.assertEqual(client._data_stub_internal, None) - self.assertEqual(client._cluster_stub_internal, None) + self.assertEqual(client._instance_stub_internal, None) self.assertEqual(client._operations_stub_internal, None) self.assertEqual(client._table_stub_internal, None) @@ -161,7 +161,7 @@ def _copy_test_helper(self, read_only=False, admin=False): # Put some fake stubs in place so that we can verify they # don't get copied. client._data_stub_internal = object() - client._cluster_stub_internal = object() + client._instance_stub_internal = object() client._operations_stub_internal = object() client._table_stub_internal = object() @@ -173,7 +173,7 @@ def _copy_test_helper(self, read_only=False, admin=False): self.assertEqual(new_client.timeout_seconds, client.timeout_seconds) # Make sure stubs are not preserved. self.assertEqual(new_client._data_stub_internal, None) - self.assertEqual(new_client._cluster_stub_internal, None) + self.assertEqual(new_client._instance_stub_internal, None) self.assertEqual(new_client._operations_stub_internal, None) self.assertEqual(new_client._table_stub_internal, None) @@ -213,29 +213,29 @@ def test_data_stub_failure(self): with self.assertRaises(ValueError): getattr(client, '_data_stub') - def test_cluster_stub_getter(self): + def test_instance_stub_getter(self): credentials = _Credentials() project = 'PROJECT' client = self._makeOne(project=project, credentials=credentials, admin=True) - client._cluster_stub_internal = object() - self.assertTrue(client._cluster_stub is client._cluster_stub_internal) + client._instance_stub_internal = object() + self.assertTrue(client._instance_stub is client._instance_stub_internal) - def test_cluster_stub_non_admin_failure(self): + def test_instance_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' client = self._makeOne(project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): - getattr(client, '_cluster_stub') + getattr(client, '_instance_stub') - def test_cluster_stub_unset_failure(self): + def test_instance_stub_unset_failure(self): credentials = _Credentials() project = 'PROJECT' client = self._makeOne(project=project, credentials=credentials, admin=True) with self.assertRaises(ValueError): - getattr(client, '_cluster_stub') + getattr(client, '_instance_stub') def test_operations_stub_getter(self): credentials = _Credentials() @@ -317,7 +317,7 @@ def mock_make_stub(*args): ), ]) - def test__make_cluster_stub(self): + def test__make_instance_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT from gcloud.bigtable.client import INSTANCE_ADMIN_HOST_V2 @@ -336,7 +336,7 @@ def mock_make_stub(*args): return fake_stub with _Monkey(MUT, _make_stub=mock_make_stub): - result = client._make_cluster_stub() + result = client._make_instance_stub() self.assertTrue(result is fake_stub) self.assertEqual(make_stub_args, [ @@ -443,13 +443,13 @@ def mock_make_stub(*args): self.assertTrue(client._data_stub_internal is stub) if admin: - self.assertTrue(client._cluster_stub_internal is stub) + self.assertTrue(client._instance_stub_internal is stub) self.assertTrue(client._operations_stub_internal is stub) self.assertTrue(client._table_stub_internal is stub) self.assertEqual(stub._entered, 4) self.assertEqual(len(make_stub_args), 4) else: - self.assertTrue(client._cluster_stub_internal is None) + self.assertTrue(client._instance_stub_internal is None) self.assertTrue(client._operations_stub_internal is None) self.assertTrue(client._table_stub_internal is None) self.assertEqual(stub._entered, 1) @@ -484,12 +484,12 @@ def _stop_method_helper(self, admin): stub1 = _FakeStub() stub2 = _FakeStub() client._data_stub_internal = stub1 - client._cluster_stub_internal = stub2 + client._instance_stub_internal = stub2 client._operations_stub_internal = stub2 client._table_stub_internal = stub2 client.stop() self.assertTrue(client._data_stub_internal is None) - self.assertTrue(client._cluster_stub_internal is None) + self.assertTrue(client._instance_stub_internal is None) self.assertTrue(client._operations_stub_internal is None) self.assertTrue(client._table_stub_internal is None) self.assertEqual(stub1._entered, 0) @@ -516,10 +516,10 @@ def test_stop_while_stopped(self): # This is a bit hacky. We set the cluster stub protected value # since it isn't used in is_started() and make sure that stop # doesn't reset this value to None. - client._cluster_stub_internal = cluster_stub = object() + client._instance_stub_internal = instance_stub = object() client.stop() # Make sure the cluster stub did not change. - self.assertEqual(client._cluster_stub_internal, cluster_stub) + self.assertEqual(client._instance_stub_internal, instance_stub) def test_instance_factory(self): from gcloud.bigtable.instance import Instance diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index a1a43b5533cc..4f8da614f439 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -321,7 +321,7 @@ def test_reload(self): ) # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. expected_result = None # reload() has no return value. @@ -364,7 +364,7 @@ def test_create(self): response_pb = operations_pb2.Operation(name=OP_NAME) # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. expected_result = MUT.Operation('create', OP_ID, cluster=cluster) @@ -419,7 +419,7 @@ def test_update(self): response_pb = operations_pb2.Operation() # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. OP_ID = 5678 @@ -459,7 +459,7 @@ def test_delete(self): response_pb = empty_pb2.Empty() # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. expected_result = None # delete() has no return value. From 7214045519faea63bb4a2144d934b8c998028492 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 11:55:01 -0400 Subject: [PATCH 046/103] Use (same) actual V2 admin endpoint host for instance / table admin. --- gcloud/bigtable/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 18c27e54272d..b9b72559fb16 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -51,14 +51,14 @@ TABLE_STUB_FACTORY_V2 = ( table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub) -TABLE_ADMIN_HOST_V2 = 'bigtabletableadmin.googleapis.com' +TABLE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com' """Table Admin API request host.""" TABLE_ADMIN_PORT_V2 = 443 """Table Admin API request port.""" INSTANCE_STUB_FACTORY_V2 = ( instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub) -INSTANCE_ADMIN_HOST_V2 = 'bigtableclusteradmin.googleapis.com' +INSTANCE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com' """Cluster Admin API request host.""" INSTANCE_ADMIN_PORT_V2 = 443 """Cluster Admin API request port.""" From c6b747e01221be5a7682888e3d97e9604a47bfe6 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 12:20:03 -0400 Subject: [PATCH 047/103] Use class-level constants. --- gcloud/bigtable/test_client.py | 46 ++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index fe34180ba5af..4bfed0280ff2 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -18,6 +18,12 @@ class TestClient(unittest2.TestCase): + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + TIMEOUT_SECONDS = 80 + USER_AGENT = 'you-sir-age-int' + def _getTargetClass(self): from gcloud.bigtable.client import Client return Client @@ -33,8 +39,7 @@ def _constructor_test_helper(self, expected_scopes, creds, user_agent = user_agent or MUT.DEFAULT_USER_AGENT timeout_seconds = timeout_seconds or MUT.DEFAULT_TIMEOUT_SECONDS - PROJECT = 'PROJECT' - client = self._makeOne(project=PROJECT, credentials=creds, + client = self._makeOne(project=self.PROJECT, credentials=creds, read_only=read_only, admin=admin, user_agent=user_agent, timeout_seconds=timeout_seconds) @@ -44,7 +49,7 @@ def _constructor_test_helper(self, expected_scopes, creds, if expected_scopes is not None: self.assertEqual(client._credentials.scopes, expected_scopes) - self.assertEqual(client.project, PROJECT) + self.assertEqual(client.project, self.PROJECT) self.assertEqual(client.timeout_seconds, timeout_seconds) self.assertEqual(client.user_agent, user_agent) # Check stubs are set (but null) @@ -63,13 +68,13 @@ def test_constructor_default_scopes(self): def test_constructor_custom_user_agent_and_timeout(self): from gcloud.bigtable import client as MUT - timeout_seconds = 1337 - user_agent = 'custom-application' + CUSTOM_TIMEOUT_SECONDS = 1337 + CUSTOM_USER_AGENT = 'custom-application' expected_scopes = [MUT.DATA_SCOPE] creds = _Credentials() self._constructor_test_helper(expected_scopes, creds, - user_agent=user_agent, - timeout_seconds=timeout_seconds) + user_agent=CUSTOM_USER_AGENT, + timeout_seconds=CUSTOM_TIMEOUT_SECONDS) def test_constructor_with_admin(self): from gcloud.bigtable import client as MUT @@ -112,8 +117,7 @@ def test_constructor_credentials_wo_create_scoped(self): def _context_manager_helper(self): credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) + client = self._makeOne(project=self.PROJECT, credentials=credentials) def mock_start(): client._data_stub_internal = object() @@ -151,13 +155,13 @@ class DummyException(Exception): def _copy_test_helper(self, read_only=False, admin=False): credentials = _Credentials('value') - project = 'PROJECT' - timeout_seconds = 123 - user_agent = 'you-sir-age-int' - client = self._makeOne(project=project, credentials=credentials, - read_only=read_only, admin=admin, - timeout_seconds=timeout_seconds, - user_agent=user_agent) + client = self._makeOne( + project=self.PROJECT, + credentials=credentials, + read_only=read_only, + admin=admin, + timeout_seconds=self.TIMEOUT_SECONDS, + user_agent=self.USER_AGENT) # Put some fake stubs in place so that we can verify they # don't get copied. client._data_stub_internal = object() @@ -550,15 +554,15 @@ def _makeOne(self, *args, **kwargs): def test_constructor(self): from gcloud.bigtable.client import Client from gcloud.bigtable.client import DATA_SCOPE + PROJECT = 'PROJECT' + USER_AGENT = 'USER_AGENT' credentials = _Credentials() - project = 'PROJECT' - user_agent = 'USER_AGENT' - client = Client(project=project, credentials=credentials, - user_agent=user_agent) + client = Client(project=PROJECT, credentials=credentials, + user_agent=USER_AGENT) transformer = self._makeOne(client) self.assertTrue(transformer._credentials is credentials) - self.assertEqual(transformer._user_agent, user_agent) + self.assertEqual(transformer._user_agent, USER_AGENT) self.assertEqual(credentials.scopes, [DATA_SCOPE]) def test___call__(self): From 40f609ebc87e9dcebcaaeedd40e3f2d32ac9c41d Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 12:20:27 -0400 Subject: [PATCH 048/103] Add 'Client.list_instances' method. --- gcloud/bigtable/client.py | 18 ++++++++++ gcloud/bigtable/test_client.py | 65 ++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index b9b72559fb16..005fe78590a3 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -392,6 +392,24 @@ def instance(self, instance_id, display_name=None): """ return Instance(instance_id, self, display_name=display_name) + def list_instances(self): + """List instances owned by the project. + + :rtype: tuple + :returns: A pair of results, the first is a list of + :class:`.Instance` objects returned and the second is a + list of strings (the failed locations in the request). + """ + request_pb = instance_admin_v2_pb2.ListInstancesRequest( + parent=self.project_name) + + response = self._instance_stub.ListInstances( + request_pb, self.timeout_seconds) + + instances = [Instance.from_pb(instance_pb, self) + for instance_pb in response.instances] + return instances, response.failed_locations + class _MetadataPlugin(object): """Callable class to transform metadata for gRPC requests. diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index 4bfed0280ff2..a318d83fb69d 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -541,6 +541,71 @@ def test_instance_factory(self): self.assertEqual(instance.display_name, DISPLAY_NAME) self.assertTrue(instance._client is client) + def test_list_instances(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + + FAILED_LOCATION = 'FAILED' + INSTANCE_ID1 = 'instance-id1' + INSTANCE_ID2 = 'instance-id2' + INSTANCE_NAME1 = ('projects/' + self.PROJECT + + '/instances/' + INSTANCE_ID1) + INSTANCE_NAME2 = ('projects/' + self.PROJECT + + '/instances/' + INSTANCE_ID2) + + credentials = _Credentials() + client = self._makeOne( + project=self.PROJECT, + credentials=credentials, + admin=True, + timeout_seconds=self.TIMEOUT_SECONDS, + ) + + # Create request_pb + request_pb = messages_v2_pb2.ListInstancesRequest( + parent='projects/' + self.PROJECT, + ) + + # Create response_pb + response_pb = messages_v2_pb2.ListInstancesResponse( + failed_locations=[ + FAILED_LOCATION, + ], + instances=[ + data_v2_pb2.Instance( + name=INSTANCE_NAME1, + display_name=INSTANCE_NAME1, + ), + data_v2_pb2.Instance( + name=INSTANCE_NAME2, + display_name=INSTANCE_NAME2, + ), + ], + ) + + # Patch the stub used by the API method. + client._instance_stub_internal = stub = _FakeStub(response_pb) + + # Create expected_result. + failed_locations = [FAILED_LOCATION] + instances = [ + client.instance(INSTANCE_ID1), + client.instance(INSTANCE_ID2), + ] + expected_result = (instances, failed_locations) + + # Perform the method and check the result. + result = client.list_instances() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListInstances', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + class Test_MetadataPlugin(unittest2.TestCase): From c87ca51a06979b66a34cacafe767a3f862a58938 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 12:32:58 -0400 Subject: [PATCH 049/103] Clean out remaining refs to V1-generated code. --- gcloud/bigtable/client.py | 6 +++--- gcloud/bigtable/cluster.py | 6 +++--- gcloud/bigtable/row.py | 2 +- gcloud/bigtable/row_data.py | 2 +- gcloud/bigtable/table.py | 8 ++++---- gcloud/bigtable/test_client.py | 11 ++++++----- gcloud/bigtable/test_row_data.py | 8 ++++---- 7 files changed, 22 insertions(+), 21 deletions(-) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 005fe78590a3..1405928df98a 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -40,8 +40,8 @@ from gcloud.bigtable._generated_v2 import ( bigtable_pb2 as data_v2_pb2) -from gcloud.bigtable._generated import ( - operations_grpc_pb2 as operations_grpc_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + operations_grpc_pb2 as operations_grpc_v2_pb2) from gcloud.bigtable.instance import Instance from gcloud.client import _ClientFactoryMixin @@ -69,7 +69,7 @@ DATA_API_PORT_V2 = 443 """Data API request port.""" -OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v1_pb2.beta_create_Operations_stub +OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v2_pb2.beta_create_Operations_stub OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2 OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2 diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index 7fd7ee3b9355..7867cff82bc7 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -210,7 +210,7 @@ def _update_from_pb(self, cluster_pb): def from_pb(cls, cluster_pb, instance): """Creates a cluster instance from a protobuf. - :type cluster_pb: :class:`bigtable_cluster_data_pb2.Cluster` + :type cluster_pb: :class:`instance_pb2.Cluster` :param cluster_pb: A cluster protobuf object. :type instance: :class:`.instance.Instance>` @@ -288,7 +288,7 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this cluster.""" request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) - # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. + # We expect a `._generated_v2.instance_pb2.Cluster`. cluster_pb = self._instance._client._instance_stub.GetCluster( request_pb, self._instance._client.timeout_seconds) @@ -346,7 +346,7 @@ def update(self): name=self.name, serve_nodes=self.serve_nodes, ) - # Ignore expected `._generated.bigtable_cluster_data_pb2.Cluster`. + # Ignore expected `._generated_v2.instance_pb2.Cluster`. operation_pb = self._instance._client._instance_stub.UpdateCluster( request_pb, self._instance._client.timeout_seconds) diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py index aae048b0c7d6..07cf9e1abec4 100644 --- a/gcloud/bigtable/row.py +++ b/gcloud/bigtable/row.py @@ -854,7 +854,7 @@ def _parse_rmw_row_response(row_response): def _parse_family_pb(family_pb): """Parses a Family protobuf into a dictionary. - :type family_pb: :class:`._generated.bigtable_data_pb2.Family` + :type family_pb: :class:`._generated_v2.data_pb2.Family` :param family_pb: A protobuf :rtype: tuple diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py index e353b8735ba9..3f4490097e68 100644 --- a/gcloud/bigtable/row_data.py +++ b/gcloud/bigtable/row_data.py @@ -44,7 +44,7 @@ def __init__(self, value, timestamp, labels=()): def from_pb(cls, cell_pb): """Create a new cell from a Cell protobuf. - :type cell_pb: :class:`._generated.bigtable_data_pb2.Cell` + :type cell_pb: :class:`._generated_v2.data_pb2.Cell` :param cell_pb: The protobuf to convert. :rtype: :class:`Cell` diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 6c2cfd82fac9..c0a3b6e335c0 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -141,7 +141,7 @@ def create(self, initial_split_keys=None): .. note:: - Though a :class:`._generated.bigtable_table_data_pb2.Table` is also + Though a :class:`._generated_v2.table_pb2.Table` is also allowed (as the ``table`` property) in a create table request, we do not support it in this method. As mentioned in the :class:`Table` docstring, the name is the only useful property in @@ -150,7 +150,7 @@ def create(self, initial_split_keys=None): .. note:: A create request returns a - :class:`._generated.bigtable_table_data_pb2.Table` but we don't use + :class:`._generated_v2.table_pb2.Table` but we don't use this response. The proto definition allows for the inclusion of a ``current_operation`` in the response, but it does not appear that the Cloud Bigtable API returns any operation. @@ -174,7 +174,7 @@ def create(self, initial_split_keys=None): table_id=self.table_id, ) client = self._instance._client - # We expect a `._generated.bigtable_table_data_pb2.Table` + # We expect a `._generated_v2.table_pb2.Table` client._table_stub.CreateTable(request_pb, client.timeout_seconds) def delete(self): @@ -199,7 +199,7 @@ def list_column_families(self): request_pb = table_admin_messages_v2_pb2.GetTableRequest( name=self.name) client = self._instance._client - # We expect a `._generated.bigtable_table_data_pb2.Table` + # We expect a `._generated_v2.table_pb2.Table` table_pb = client._table_stub.GetTable(request_pb, client.timeout_seconds) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index a318d83fb69d..414e1ae5e171 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -223,7 +223,8 @@ def test_instance_stub_getter(self): client = self._makeOne(project=project, credentials=credentials, admin=True) client._instance_stub_internal = object() - self.assertTrue(client._instance_stub is client._instance_stub_internal) + self.assertTrue( + client._instance_stub is client._instance_stub_internal) def test_instance_stub_non_admin_failure(self): credentials = _Credentials() @@ -551,10 +552,10 @@ def test_list_instances(self): FAILED_LOCATION = 'FAILED' INSTANCE_ID1 = 'instance-id1' INSTANCE_ID2 = 'instance-id2' - INSTANCE_NAME1 = ('projects/' + self.PROJECT + - '/instances/' + INSTANCE_ID1) - INSTANCE_NAME2 = ('projects/' + self.PROJECT + - '/instances/' + INSTANCE_ID2) + INSTANCE_NAME1 = ( + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1) + INSTANCE_NAME2 = ( + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) credentials = _Credentials() client = self._makeOne( diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index 6fae4d18c40b..2162212e7fdd 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -28,19 +28,19 @@ def _makeOne(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) timestamp_micros = 18738724000 # Make sure millis granularity timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) value = b'value-bytes' if labels is None: - cell_pb = data_v1_pb2.Cell( + cell_pb = data_v2_pb2.Cell( value=value, timestamp_micros=timestamp_micros) cell_expected = self._makeOne(value, timestamp) else: - cell_pb = data_v1_pb2.Cell( + cell_pb = data_v2_pb2.Cell( value=value, timestamp_micros=timestamp_micros, labels=labels) cell_expected = self._makeOne(value, timestamp, labels=labels) From 3467d992ed8afa0e33125794b03c4a0e0440e3d3 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 13:09:31 -0400 Subject: [PATCH 050/103] Work around borked gRPC generation for g.longrunning.operations. See reopened #1918. --- gcloud/bigtable/_generated_v2/operations_grpc_pb2.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py index 66491c348817..5723e1d99fe0 100644 --- a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py +++ b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py @@ -1,4 +1,12 @@ - +from google.longrunning.operations_pb2 import ( + CancelOperationRequest, + DeleteOperationRequest, + GetOperationRequest, + ListOperationsRequest, + ListOperationsResponse, + Operation, + google_dot_protobuf_dot_empty__pb2, +) from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality From 784fe01c539a3c2b2aa42617dafd4f2866e3a202 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 13:10:06 -0400 Subject: [PATCH 051/103] Revise Bigtable system tests to fit instance-based patterns. --- system_tests/bigtable.py | 151 ++++++++++++++++++--------------------- 1 file changed, 68 insertions(+), 83 deletions(-) diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index 3259adea6c15..fc5cc3694e50 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -35,9 +35,8 @@ from system_test_utils import unique_resource_id -CENTRAL_1C_ZONE = 'us-central1-c' -CLUSTER_ID = 'gcloud' + unique_resource_id('-') -CLUSTER_ID = CLUSTER_ID[:30] # Cluster IDs can't exceed 30 chars. +INSTANCE_ID = 'gcloud' + unique_resource_id('-') +#INSTANCE_ID = INSTANCE_ID[:30] # Instance IDs can't exceed 30 chars. TABLE_ID = 'gcloud-python-test-table' COLUMN_FAMILY_ID1 = u'col-fam-id1' COLUMN_FAMILY_ID2 = u'col-fam-id2' @@ -50,13 +49,7 @@ CELL_VAL4 = b'foo' ROW_KEY = b'row-key' ROW_KEY_ALT = b'row-key-alt' -EXISTING_CLUSTERS = [] -EXPECTED_ZONES = ( - 'asia-east1-b', - 'europe-west1-c', - 'us-central1-b', - CENTRAL_1C_ZONE, -) +EXISTING_INSTANCES = [] class Config(object): @@ -66,13 +59,13 @@ class Config(object): global state. """ CLIENT = None - CLUSTER = None + INSTANCE = None def _operation_wait(operation, max_attempts=5): """Wait until an operation has completed. - :type operation: :class:`gcloud.bigtable.cluster.Operation` + :type operation: :class:`gcloud.bigtable.instance.Operation` :param operation: Operation that has not finished. :type max_attempts: int @@ -92,101 +85,93 @@ def _operation_wait(operation, max_attempts=5): def setUpModule(): _helpers.PROJECT = TESTS_PROJECT Config.CLIENT = Client(admin=True) - Config.CLUSTER = Config.CLIENT.cluster(CENTRAL_1C_ZONE, CLUSTER_ID, - display_name=CLUSTER_ID) + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID) Config.CLIENT.start() - clusters, failed_zones = Config.CLIENT.list_clusters() + instances, failed_locations = Config.CLIENT.list_instances() - if len(failed_zones) != 0: - raise ValueError('List clusters failed in module set up.') + if len(failed_locations) != 0: + raise ValueError('List instances failed in module set up.') - EXISTING_CLUSTERS[:] = clusters + EXISTING_INSTANCES[:] = instances - # After listing, create the test cluster. - created_op = Config.CLUSTER.create() + # After listing, create the test instance. + created_op = Config.INSTANCE.create() if not _operation_wait(created_op): - raise RuntimeError('Cluster creation exceed 5 seconds.') + raise RuntimeError('Instance creation exceed 5 seconds.') def tearDownModule(): - Config.CLUSTER.delete() + Config.INSTANCE.delete() Config.CLIENT.stop() -class TestClusterAdminAPI(unittest2.TestCase): +class TestInstanceAdminAPI(unittest2.TestCase): def setUp(self): - self.clusters_to_delete = [] + self.instances_to_delete = [] def tearDown(self): - for cluster in self.clusters_to_delete: - cluster.delete() - - def test_list_zones(self): - zones = Config.CLIENT.list_zones() - self.assertEqual(sorted(zones), sorted(EXPECTED_ZONES)) - - def test_list_clusters(self): - clusters, failed_zones = Config.CLIENT.list_clusters() - self.assertEqual(failed_zones, []) - # We have added one new cluster in `setUpModule`. - self.assertEqual(len(clusters), len(EXISTING_CLUSTERS) + 1) - for cluster in clusters: - cluster_existence = (cluster in EXISTING_CLUSTERS or - cluster == Config.CLUSTER) - self.assertTrue(cluster_existence) + for instance in self.instances_to_delete: + instance.delete() + + def test_list_instances(self): + instances, failed_locations = Config.CLIENT.list_instances() + self.assertEqual(failed_locations, []) + # We have added one new instance in `setUpModule`. + self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) + for instance in instances: + instance_existence = (instance in EXISTING_INSTANCES or + instance == Config.INSTANCE) + self.assertTrue(instance_existence) def test_reload(self): - # Use same arguments as Config.CLUSTER (created in `setUpModule`) + # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. - cluster = Config.CLIENT.cluster(CENTRAL_1C_ZONE, CLUSTER_ID) + instance = Config.CLIENT.instance(INSTANCE_ID) # Make sure metadata unset before reloading. - cluster.display_name = None - cluster.serve_nodes = None + instance.display_name = None - cluster.reload() - self.assertEqual(cluster.display_name, Config.CLUSTER.display_name) - self.assertEqual(cluster.serve_nodes, Config.CLUSTER.serve_nodes) + instance.reload() + self.assertEqual(instance.display_name, Config.INSTANCE.display_name) - def test_create_cluster(self): - cluster_id = 'new' + unique_resource_id('-') - cluster_id = cluster_id[:30] # Cluster IDs can't exceed 30 chars. - cluster = Config.CLIENT.cluster(CENTRAL_1C_ZONE, cluster_id) - operation = cluster.create() - # Make sure this cluster gets deleted after the test case. - self.clusters_to_delete.append(cluster) + def test_create_instance(self): + instance_id = 'new' + unique_resource_id('-') + #instance_id = instance_id[:30] # Instance IDs can't exceed 30 chars. + instance = Config.CLIENT.instance(instance_id) + operation = instance.create() + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) # We want to make sure the operation completes. self.assertTrue(_operation_wait(operation)) - # Create a new cluster instance and make sure it is the same. - cluster_alt = Config.CLIENT.cluster(CENTRAL_1C_ZONE, cluster_id) - cluster_alt.reload() + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(instance_id) + instance_alt.reload() - self.assertEqual(cluster, cluster_alt) - self.assertEqual(cluster.display_name, cluster_alt.display_name) - self.assertEqual(cluster.serve_nodes, cluster_alt.serve_nodes) + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) def test_update(self): - curr_display_name = Config.CLUSTER.display_name - Config.CLUSTER.display_name = 'Foo Bar Baz' - operation = Config.CLUSTER.update() + curr_display_name = Config.INSTANCE.display_name + Config.INSTANCE.display_name = 'Foo Bar Baz' + operation = Config.INSTANCE.update() # We want to make sure the operation completes. self.assertTrue(_operation_wait(operation)) - # Create a new cluster instance and make sure it is the same. - cluster_alt = Config.CLIENT.cluster(CENTRAL_1C_ZONE, CLUSTER_ID) - self.assertNotEqual(cluster_alt.display_name, - Config.CLUSTER.display_name) - cluster_alt.reload() - self.assertEqual(cluster_alt.display_name, - Config.CLUSTER.display_name) + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(INSTANCE_ID) + self.assertNotEqual(instance_alt.display_name, + Config.INSTANCE.display_name) + instance_alt.reload() + self.assertEqual(instance_alt.display_name, + Config.INSTANCE.display_name) - # Make sure to put the cluster back the way it was for the + # Make sure to put the instance back the way it was for the # other test cases. - Config.CLUSTER.display_name = curr_display_name - operation = Config.CLUSTER.update() + Config.INSTANCE.display_name = curr_display_name + operation = Config.INSTANCE.update() # We want to make sure the operation completes. self.assertTrue(_operation_wait(operation)) @@ -196,7 +181,7 @@ class TestTableAdminAPI(unittest2.TestCase): @classmethod def setUpClass(cls): - cls._table = Config.CLUSTER.table(TABLE_ID) + cls._table = Config.INSTANCE.table(TABLE_ID) cls._table.create() @classmethod @@ -211,14 +196,14 @@ def tearDown(self): table.delete() def test_list_tables(self): - # Since `Config.CLUSTER` is newly created in `setUpModule`, the table + # Since `Config.INSTANCE` is newly created in `setUpModule`, the table # created in `setUpClass` here will be the only one. - tables = Config.CLUSTER.list_tables() + tables = Config.INSTANCE.list_tables() self.assertEqual(tables, [self._table]) def test_create_table(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -226,15 +211,15 @@ def test_create_table(self): name_attr = operator.attrgetter('name') expected_tables = sorted([temp_table, self._table], key=name_attr) - # Then query for the tables in the cluster and sort them by + # Then query for the tables in the instance and sort them by # name as well. - tables = Config.CLUSTER.list_tables() + tables = Config.INSTANCE.list_tables() sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) def test_create_column_family(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -255,7 +240,7 @@ def test_create_column_family(self): def test_update_column_family(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -278,7 +263,7 @@ def test_update_column_family(self): def test_delete_column_family(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -299,7 +284,7 @@ class TestDataAPI(unittest2.TestCase): @classmethod def setUpClass(cls): - cls._table = table = Config.CLUSTER.table(TABLE_ID) + cls._table = table = Config.INSTANCE.table(TABLE_ID) table.create() table.column_family(COLUMN_FAMILY_ID1).create() table.column_family(COLUMN_FAMILY_ID2).create() From 024c7f30e4971be0f69d5c502260c7ecc8d5cb17 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 13:51:48 -0400 Subject: [PATCH 052/103] Plumb cluster 'location'/'serve_nodes' through instance creation. Note that 'location' is a required argument for new instances: we need it to pass to the automatically-created cluster belonging to a newly-created instance. Non-default 'serve_nodes' can be passed in the automatically-created cluster belonging to a newly-created instance. --- gcloud/bigtable/client.py | 16 ++- gcloud/bigtable/instance.py | 28 +++++- gcloud/bigtable/test_client.py | 36 ++++++- gcloud/bigtable/test_instance.py | 166 +++++++++++++++++++++++++------ system_tests/bigtable.py | 53 +++++++--- 5 files changed, 242 insertions(+), 57 deletions(-) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 1405928df98a..82e96ef7a340 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -43,6 +43,7 @@ from gcloud.bigtable._generated_v2 import ( operations_grpc_pb2 as operations_grpc_v2_pb2) +from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES from gcloud.bigtable.instance import Instance from gcloud.client import _ClientFactoryMixin from gcloud.client import _ClientProjectMixin @@ -375,22 +376,33 @@ def __exit__(self, exc_type, exc_val, exc_t): """Stops the client as a context manager.""" self.stop() - def instance(self, instance_id, display_name=None): + def instance(self, instance_id, location, + display_name=None, serve_nodes=DEFAULT_SERVE_NODES): """Factory to create a instance associated with this client. :type instance_id: str :param instance_id: The ID of the instance. + :type location: string + :param location: location name, in form + ``projects//locations/``; used to + set up the instance's cluster. + :type display_name: str :param display_name: (Optional) The display name for the instance in the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + :rtype: :class:`.Instance` :returns: an instance owned by this client. """ - return Instance(instance_id, self, display_name=display_name) + return Instance(instance_id, self, location, + display_name=display_name, serve_nodes=serve_nodes) def list_instances(self): """List instances owned by the project. diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py index c33d8a64c9e1..a18aaa7e9b26 100644 --- a/gcloud/bigtable/instance.py +++ b/gcloud/bigtable/instance.py @@ -27,9 +27,11 @@ from gcloud.bigtable._generated_v2 import ( bigtable_table_admin_pb2 as table_messages_v2_pb2) from gcloud.bigtable.cluster import Cluster +from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES from gcloud.bigtable.table import Table +_EXISTING_INSTANCE_LOCATION = 'existing instance, location in cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') _OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/' @@ -53,13 +55,18 @@ def _prepare_create_request(instance): :returns: The CreateInstance request object containing the instance info. """ parent_name = ('projects/' + instance._client.project) - return messages_v2_pb2.CreateInstanceRequest( + message = messages_v2_pb2.CreateInstanceRequest( parent=parent_name, instance_id=instance.instance_id, instance=data_v2_pb2.Instance( display_name=instance.display_name, ), ) + cluster = message.clusters[instance.instance_id] + cluster.name = instance.name + '/clusters/' + instance.instance_id + cluster.location = instance._cluster_location + cluster.serve_nodes = instance._cluster_serve_nodes + return message def _parse_pb_any_to_native(any_val, expected_type=None): @@ -199,17 +206,28 @@ class Instance(object): :param client: The client that owns the instance. Provides authorization and a project ID. + :type location: string + :param location: location name, in form + ``projects//locations/``; used to + set up the instance's cluster. + :type display_name: str :param display_name: (Optional) The display name for the instance in the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. """ - def __init__(self, instance_id, client, - display_name=None): + def __init__(self, instance_id, client, location, display_name=None, + serve_nodes=DEFAULT_SERVE_NODES): self.instance_id = instance_id self.display_name = display_name or instance_id + self._cluster_location = location + self._cluster_serve_nodes = serve_nodes self._client = client def _update_from_pb(self, instance_pb): @@ -246,8 +264,9 @@ def from_pb(cls, instance_pb, client): if match.group('project') != client.project: raise ValueError('Project ID on instance does not match the ' 'project ID on the client') + instance_id = match.group('instance_id') - result = cls(match.group('instance_id'), client) + result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION) result._update_from_pb(instance_pb) return result @@ -262,6 +281,7 @@ def copy(self): """ new_client = self._client.copy() return self.__class__(self.instance_id, new_client, + self._cluster_location, display_name=self.display_name) @property diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index 414e1ae5e171..0bbc5f35b3f4 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -526,20 +526,47 @@ def test_stop_while_stopped(self): # Make sure the cluster stub did not change. self.assertEqual(client._instance_stub_internal, instance_stub) - def test_instance_factory(self): + def test_instance_factory_defaults(self): + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES from gcloud.bigtable.instance import Instance PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' + LOCATION = 'projects/' + PROJECT + '/locations/locname' + credentials = _Credentials() + client = self._makeOne(project=PROJECT, credentials=credentials) + + instance = client.instance(INSTANCE_ID, LOCATION, + display_name=DISPLAY_NAME) + + self.assertTrue(isinstance(instance, Instance)) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance._cluster_location, LOCATION) + self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) + self.assertTrue(instance._client is client) + + def test_instance_factory_w_explicit_serve_nodes(self): + from gcloud.bigtable.instance import Instance + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + LOCATION = 'projects/' + PROJECT + '/locations/locname' + SERVE_NODES = 5 credentials = _Credentials() client = self._makeOne(project=PROJECT, credentials=credentials) - instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) + instance = client.instance( + INSTANCE_ID, display_name=DISPLAY_NAME, + location=LOCATION, serve_nodes=SERVE_NODES) + self.assertTrue(isinstance(instance, Instance)) self.assertEqual(instance.instance_id, INSTANCE_ID) self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance._cluster_location, LOCATION) + self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES) self.assertTrue(instance._client is client) def test_list_instances(self): @@ -549,6 +576,7 @@ def test_list_instances(self): bigtable_instance_admin_pb2 as messages_v2_pb2) from gcloud.bigtable._testing import _FakeStub + LOCATION = 'projects/' + self.PROJECT + '/locations/locname' FAILED_LOCATION = 'FAILED' INSTANCE_ID1 = 'instance-id1' INSTANCE_ID2 = 'instance-id2' @@ -593,8 +621,8 @@ def test_list_instances(self): # Create expected_result. failed_locations = [FAILED_LOCATION] instances = [ - client.instance(INSTANCE_ID1), - client.instance(INSTANCE_ID2), + client.instance(INSTANCE_ID1, LOCATION), + client.instance(INSTANCE_ID2, LOCATION), ] expected_result = (instances, failed_locations) diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py index 38e9ef959194..25ef8d64c165 100644 --- a/gcloud/bigtable/test_instance.py +++ b/gcloud/bigtable/test_instance.py @@ -86,11 +86,12 @@ def _finished_helper(self, done): from gcloud.bigtable.instance import Instance PROJECT = 'PROJECT' + LOCATION = 'projects/' + PROJECT + '/locations/locname' INSTANCE_ID = 'instance-id' TIMEOUT_SECONDS = 1 client = _Client(PROJECT, timeout_seconds=TIMEOUT_SECONDS) - instance = Instance(INSTANCE_ID, client) + instance = Instance(INSTANCE_ID, client, LOCATION) operation = self._makeOne( self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) @@ -135,7 +136,8 @@ class TestInstance(unittest2.TestCase): PROJECT = 'project' INSTANCE_ID = 'instance-id' - INSTANCE_NAME = ('projects/' + PROJECT + '/instances/' + INSTANCE_ID) + INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID + LOCATION = 'projects/' + PROJECT + '/locations/locname' DISPLAY_NAME = 'display_name' OP_ID = 8915 OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % @@ -152,18 +154,21 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_constructor_defaults(self): - client = object() + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - instance = self._makeOne(self.INSTANCE_ID, client) + client = object() + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertTrue(instance._client is client) + self.assertEqual(instance._cluster_location, self.LOCATION) + self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) def test_constructor_non_default(self): display_name = 'display_name' client = object() - instance = self._makeOne(self.INSTANCE_ID, client, + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, display_name=display_name) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, display_name) @@ -173,7 +178,7 @@ def test_copy(self): display_name = 'display_name' client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, display_name=display_name) new_instance = instance.copy() @@ -187,7 +192,7 @@ def test_copy(self): def test_table_factory(self): from gcloud.bigtable.table import Table - instance = self._makeOne(self.INSTANCE_ID, None) + instance = self._makeOne(self.INSTANCE_ID, None, self.LOCATION) table = instance.table(self.TABLE_ID) self.assertTrue(isinstance(table, Table)) @@ -203,7 +208,7 @@ def test__update_from_pb_success(self): display_name=display_name, ) - instance = self._makeOne(None, None, None) + instance = self._makeOne(None, None, None, None) self.assertEqual(instance.display_name, None) instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, display_name) @@ -213,13 +218,14 @@ def test__update_from_pb_no_display_name(self): instance_pb2 as data_v2_pb2) instance_pb = data_v2_pb2.Instance() - instance = self._makeOne(None, None, None) + instance = self._makeOne(None, None, None, None) self.assertEqual(instance.display_name, None) with self.assertRaises(ValueError): instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, None) def test_from_pb_success(self): + from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION from gcloud.bigtable._generated_v2 import ( instance_pb2 as data_v2_pb2) @@ -235,6 +241,8 @@ def test_from_pb_success(self): self.assertTrue(isinstance(instance, klass)) self.assertEqual(instance._client, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance._cluster_location, + _EXISTING_INSTANCE_LOCATION) def test_from_pb_bad_instance_name(self): from gcloud.bigtable._generated_v2 import ( @@ -265,31 +273,31 @@ def test_from_pb_project_mistmatch(self): def test_name_property(self): client = _Client(project=self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) self.assertEqual(instance.name, self.INSTANCE_NAME) def test___eq__(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client) - instance2 = self._makeOne(self.INSTANCE_ID, client) + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) self.assertEqual(instance1, instance2) def test___eq__type_differ(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client) + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) instance2 = object() self.assertNotEqual(instance1, instance2) def test___ne__same_value(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client) - instance2 = self._makeOne(self.INSTANCE_ID, client) + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) comparison_val = (instance1 != instance2) self.assertFalse(comparison_val) def test___ne__(self): - instance1 = self._makeOne('instance_id1', 'client1') - instance2 = self._makeOne('instance_id2', 'client2') + instance1 = self._makeOne('instance_id1', 'client1', self.LOCATION) + instance2 = self._makeOne('instance_id2', 'client2', self.LOCATION) self.assertNotEqual(instance1, instance2) def test_reload(self): @@ -300,7 +308,7 @@ def test_reload(self): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) # Create request_pb request_pb = messages_v2_pb.GetInstanceRequest( @@ -340,7 +348,62 @@ def test_create(self): from gcloud.bigtable import instance as MUT client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + + # Create request_pb. Just a mock since we monkey patch + # _prepare_create_request + request_pb = object() + + # Create response_pb + op_begin = object() + response_pb = operations_pb2.Operation(name=self.OP_NAME) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = MUT.Operation('create', self.OP_ID, op_begin, + instance=instance) + + # Create the mocks. + prep_create_called = [] + + def mock_prep_create_req(instance): + prep_create_called.append(instance) + return request_pb + + process_operation_called = [] + + def mock_process_operation(operation_pb): + process_operation_called.append(operation_pb) + return self.OP_ID, op_begin + + # Perform the method and check the result. + with _Monkey(MUT, + _prepare_create_request=mock_prep_create_req, + _process_operation=mock_process_operation): + result = instance.create() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'CreateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + self.assertEqual(prep_create_called, [instance]) + self.assertEqual(process_operation_called, [response_pb]) + + def test_create_w_explicit_serve_nodes(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable import instance as MUT + + SERVE_NODES = 5 + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, + serve_nodes=SERVE_NODES) # Create request_pb. Just a mock since we monkey patch # _prepare_create_request @@ -391,7 +454,7 @@ def test_update(self): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, display_name=self.DISPLAY_NAME) # Create request_pb @@ -426,7 +489,7 @@ def test_delete(self): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) # Create request_pb request_pb = messages_v2_pb.DeleteInstanceRequest( @@ -465,7 +528,7 @@ def test_list_clusters(self): SERVE_NODES = 4 client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) @@ -516,7 +579,7 @@ def _list_tables_helper(self, table_name=None): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) # Create request_ request_pb = table_messages_v1_pb2.ListTablesRequest( @@ -566,32 +629,71 @@ def test_list_tables_failure_name_bad_before(self): class Test__prepare_create_request(unittest2.TestCase): + PROJECT = 'PROJECT' + PARENT = 'projects/' + PROJECT + LOCATION = 'projects/' + PROJECT + '/locations/locname' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = PARENT + '/instances/' + INSTANCE_ID + CLUSTER_NAME = INSTANCE_NAME + '/clusters/' + INSTANCE_ID - def _callFUT(self, instance): + def _callFUT(self, instance, **kw): from gcloud.bigtable.instance import _prepare_create_request - return _prepare_create_request(instance) + return _prepare_create_request(instance, **kw) - def test_it(self): + def test_w_defaults(self): + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES from gcloud.bigtable._generated_v2 import ( instance_pb2 as data_v2_pb2) from gcloud.bigtable._generated_v2 import ( bigtable_instance_admin_pb2 as messages_v2_pb) from gcloud.bigtable.instance import Instance - PROJECT = 'PROJECT' - INSTANCE_ID = 'instance-id' + client = _Client(self.PROJECT) + + instance = Instance(self.INSTANCE_ID, client, self.LOCATION) + request_pb = self._callFUT(instance) + self.assertTrue(isinstance(request_pb, + messages_v2_pb.CreateInstanceRequest)) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) + self.assertEqual(request_pb.parent, self.PARENT) + self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance)) + self.assertEqual(request_pb.instance.name, u'') + self.assertEqual(request_pb.instance.display_name, self.INSTANCE_ID) + + # An instance must also define a same-named cluster + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster)) + self.assertEqual(cluster.name, self.CLUSTER_NAME) + self.assertEqual(cluster.location, self.LOCATION) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + + def test_w_explicit_serve_nodes(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable.instance import Instance DISPLAY_NAME = u'DISPLAY_NAME' - client = _Client(PROJECT) + SERVE_NODES = 5 + client = _Client(self.PROJECT) + instance = Instance(self.INSTANCE_ID, client, self.LOCATION, + display_name=DISPLAY_NAME, + serve_nodes=SERVE_NODES) - instance = Instance(INSTANCE_ID, client, display_name=DISPLAY_NAME) request_pb = self._callFUT(instance) + self.assertTrue(isinstance(request_pb, messages_v2_pb.CreateInstanceRequest)) - self.assertEqual(request_pb.instance_id, INSTANCE_ID) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) self.assertEqual(request_pb.parent, - 'projects/' + PROJECT) + 'projects/' + self.PROJECT) self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance)) self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME) + # An instance must also define a same-named cluster + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster)) + self.assertEqual(cluster.location, self.LOCATION) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) class Test__parse_pb_any_to_native(unittest2.TestCase): diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index fc5cc3694e50..352bf7365d2e 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -14,6 +14,7 @@ import datetime import operator +import os import time import unittest2 @@ -35,8 +36,8 @@ from system_test_utils import unique_resource_id +LOCATION_ID = 'us-central1-c' INSTANCE_ID = 'gcloud' + unique_resource_id('-') -#INSTANCE_ID = INSTANCE_ID[:30] # Instance IDs can't exceed 30 chars. TABLE_ID = 'gcloud-python-test-table' COLUMN_FAMILY_ID1 = u'col-fam-id1' COLUMN_FAMILY_ID2 = u'col-fam-id2' @@ -60,6 +61,7 @@ class Config(object): """ CLIENT = None INSTANCE = None + LOCATION_NAME = None def _operation_wait(operation, max_attempts=5): @@ -82,12 +84,31 @@ def _operation_wait(operation, max_attempts=5): return True +def _retry_backoof(meth, *args, **kw): + from grpc.beta.interfaces import StatusCode + from grpc.framework.interfaces.face.face import AbortionError + backoff_intervals = [1, 2, 4, 8] + while True: + try: + return meth(*args, **kw) + except AbortionError as error: + if error.code != StatusCode.UNAVAILABLE: + raise + if backoff_intervals: + time.sleep(backoff_intervals.pop(0)) + else: + raise + + def setUpModule(): _helpers.PROJECT = TESTS_PROJECT + PROJECT = os.getenv(TESTS_PROJECT) + Config.LOCATION_NAME = 'projects/%s/locations/%s' % (PROJECT, LOCATION_ID) Config.CLIENT = Client(admin=True) - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID) + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, Config.LOCATION_NAME) Config.CLIENT.start() - instances, failed_locations = Config.CLIENT.list_instances() + instances, failed_locations = _retry_backoof( + Config.CLIENT.list_instances) if len(failed_locations) != 0: raise ValueError('List instances failed in module set up.') @@ -121,13 +142,13 @@ def test_list_instances(self): self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) for instance in instances: instance_existence = (instance in EXISTING_INSTANCES or - instance == Config.INSTANCE) + instance == Config.INSTANCE) self.assertTrue(instance_existence) def test_reload(self): # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. - instance = Config.CLIENT.instance(INSTANCE_ID) + instance = Config.CLIENT.instance(INSTANCE_ID, Config.LOCATION_NAME) # Make sure metadata unset before reloading. instance.display_name = None @@ -135,9 +156,9 @@ def test_reload(self): self.assertEqual(instance.display_name, Config.INSTANCE.display_name) def test_create_instance(self): - instance_id = 'new' + unique_resource_id('-') - #instance_id = instance_id[:30] # Instance IDs can't exceed 30 chars. - instance = Config.CLIENT.instance(instance_id) + ALT_INSTANCE_ID = 'new' + unique_resource_id('-') + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, Config.LOCATION_NAME) operation = instance.create() # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) @@ -146,31 +167,33 @@ def test_create_instance(self): self.assertTrue(_operation_wait(operation)) # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(instance_id) + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, + Config.LOCATION_NAME) instance_alt.reload() self.assertEqual(instance, instance_alt) self.assertEqual(instance.display_name, instance_alt.display_name) def test_update(self): - curr_display_name = Config.INSTANCE.display_name - Config.INSTANCE.display_name = 'Foo Bar Baz' + CURR_DISPLAY_NAME = Config.INSTANCE.display_name + NEW_DISPLAY_NAME = 'Foo Bar Baz' + Config.INSTANCE.display_name = NEW_DISPLAY_NAME operation = Config.INSTANCE.update() # We want to make sure the operation completes. self.assertTrue(_operation_wait(operation)) # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(INSTANCE_ID) + instance_alt = Config.CLIENT.instance(INSTANCE_ID, + Config.LOCATION_NAME) self.assertNotEqual(instance_alt.display_name, Config.INSTANCE.display_name) instance_alt.reload() - self.assertEqual(instance_alt.display_name, - Config.INSTANCE.display_name) + self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) # Make sure to put the instance back the way it was for the # other test cases. - Config.INSTANCE.display_name = curr_display_name + Config.INSTANCE.display_name = CURR_DISPLAY_NAME operation = Config.INSTANCE.update() # We want to make sure the operation completes. From 5e85b5d0873d45a4daec94eeb9e656d71da5cdea Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 15:22:57 -0400 Subject: [PATCH 053/103] CreateInstance operation name includes location. --- gcloud/bigtable/instance.py | 32 ++++++++++------- gcloud/bigtable/test_instance.py | 59 +++++++++++++++++++------------- 2 files changed, 54 insertions(+), 37 deletions(-) diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py index a18aaa7e9b26..ee539a19acfa 100644 --- a/gcloud/bigtable/instance.py +++ b/gcloud/bigtable/instance.py @@ -35,8 +35,9 @@ _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') _OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/' - r'instances/([a-z][-a-z0-9]*)/operations/' - r'(?P\d+)$') + r'instances/([a-z][-a-z0-9]*)/' + r'locations/(?P[a-z][-a-z0-9]*)/' + r'operations/(?P\d+)$') _TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' _ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.v2.' _INSTANCE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateInstanceMetadata' @@ -98,25 +99,24 @@ def _process_operation(operation_pb): :param operation_pb: The long-running operation response from a Create/Update/Undelete instance request. - :rtype: tuple - :returns: A pair of an integer and datetime stamp. The integer is the ID - of the operation (``operation_id``) and the timestamp when - the create operation began (``operation_begin``). + :rtype: (int, str, datetime) + :returns: (operation_id, location_id, operation_begin). :raises: :class:`ValueError ` if the operation name doesn't match the :data:`_OPERATION_NAME_RE` regex. """ match = _OPERATION_NAME_RE.match(operation_pb.name) if match is None: raise ValueError('Operation name was not in the expected ' - 'format after a instance modification.', + 'format after instance creation.', operation_pb.name) + location_id = match.group('location_id') operation_id = int(match.group('operation_id')) request_metadata = _parse_pb_any_to_native(operation_pb.metadata) operation_begin = _pb_timestamp_to_datetime( request_metadata.request_time) - return operation_id, operation_begin + return operation_id, location_id, operation_begin class Operation(object): @@ -135,14 +135,18 @@ class Operation(object): :type begin: :class:`datetime.datetime` :param begin: The time when the operation was started. + :type location_id: str + :param location_id: ID of the location in which the operation is running + :type instance: :class:`Instance` :param instance: The instance that created the operation. """ - def __init__(self, op_type, op_id, begin, instance=None): + def __init__(self, op_type, op_id, begin, location_id, instance=None): self.op_type = op_type self.op_id = op_id self.begin = begin + self.location_id = location_id self._instance = instance self._complete = False @@ -152,6 +156,7 @@ def __eq__(self, other): return (other.op_type == self.op_type and other.op_id == self.op_id and other.begin == self.begin and + other.location_id == self.location_id and other._instance == self._instance and other._complete == self._complete) @@ -169,8 +174,9 @@ def finished(self): if self._complete: raise ValueError('The operation has completed.') - operation_name = ('operations/' + self._instance.name + - '/operations/%d' % (self.op_id,)) + operation_name = ( + 'operations/%s/locations/%s/operations/%d' % + (self._instance.name, self.location_id, self.op_id)) request_pb = operations_pb2.GetOperationRequest(name=operation_name) # We expect a `google.longrunning.operations_pb2.Operation`. operation_pb = self._instance._client._operations_stub.GetOperation( @@ -352,8 +358,8 @@ def create(self): operation_pb = self._client._instance_stub.CreateInstance( request_pb, self._client.timeout_seconds) - op_id, op_begin = _process_operation(operation_pb) - return Operation('create', op_id, op_begin, instance=self) + op_id, loc_id, op_begin = _process_operation(operation_pb) + return Operation('create', op_id, op_begin, loc_id, instance=self) def update(self): """Update this instance. diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py index 25ef8d64c165..6dd38d66789c 100644 --- a/gcloud/bigtable/test_instance.py +++ b/gcloud/bigtable/test_instance.py @@ -22,6 +22,7 @@ class TestOperation(unittest2.TestCase): OP_TYPE = 'fake-op' OP_ID = 8915 BEGIN = datetime.datetime(2015, 10, 22, 1, 1) + LOCATION_ID = 'loc-id' def _getTargetClass(self): from gcloud.bigtable.instance import Operation @@ -32,11 +33,13 @@ def _makeOne(self, *args, **kwargs): def _constructor_test_helper(self, instance=None): operation = self._makeOne( - self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) self.assertEqual(operation.op_type, self.OP_TYPE) self.assertEqual(operation.op_id, self.OP_ID) self.assertEqual(operation.begin, self.BEGIN) + self.assertEqual(operation.location_id, self.LOCATION_ID) self.assertEqual(operation._instance, instance) self.assertFalse(operation._complete) @@ -50,32 +53,36 @@ def test_constructor_explicit_instance(self): def test___eq__(self): instance = object() operation1 = self._makeOne( - self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) operation2 = self._makeOne( - self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) self.assertEqual(operation1, operation2) def test___eq__type_differ(self): - operation1 = self._makeOne('foo', 123, None) + operation1 = self._makeOne('foo', 123, None, self.LOCATION_ID) operation2 = object() self.assertNotEqual(operation1, operation2) def test___ne__same_value(self): instance = object() operation1 = self._makeOne( - self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) operation2 = self._makeOne( - self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) comparison_val = (operation1 != operation2) self.assertFalse(comparison_val) def test___ne__(self): - operation1 = self._makeOne('foo', 123, None) - operation2 = self._makeOne('bar', 456, None) + operation1 = self._makeOne('foo', 123, None, self.LOCATION_ID) + operation2 = self._makeOne('bar', 456, None, self.LOCATION_ID) self.assertNotEqual(operation1, operation2) def test_finished_without_operation(self): - operation = self._makeOne(None, None, None) + operation = self._makeOne(None, None, None, None) operation._complete = True with self.assertRaises(ValueError): operation.finished() @@ -93,11 +100,13 @@ def _finished_helper(self, done): client = _Client(PROJECT, timeout_seconds=TIMEOUT_SECONDS) instance = Instance(INSTANCE_ID, client, LOCATION) operation = self._makeOne( - self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) # Create request_pb op_name = ('operations/projects/' + PROJECT + '/instances/' + INSTANCE_ID + + '/locations/' + self.LOCATION_ID + '/operations/%d' % (self.OP_ID,)) request_pb = operations_pb2.GetOperationRequest(name=op_name) @@ -355,15 +364,15 @@ def test_create(self): request_pb = object() # Create response_pb - op_begin = object() + OP_BEGIN = object() response_pb = operations_pb2.Operation(name=self.OP_NAME) # Patch the stub used by the API method. client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. - expected_result = MUT.Operation('create', self.OP_ID, op_begin, - instance=instance) + expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN, + self.LOCATION, instance=instance) # Create the mocks. prep_create_called = [] @@ -376,7 +385,7 @@ def mock_prep_create_req(instance): def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return self.OP_ID, op_begin + return self.OP_ID, self.LOCATION, OP_BEGIN # Perform the method and check the result. with _Monkey(MUT, @@ -410,15 +419,15 @@ def test_create_w_explicit_serve_nodes(self): request_pb = object() # Create response_pb - op_begin = object() + OP_BEGIN = object() response_pb = operations_pb2.Operation(name=self.OP_NAME) # Patch the stub used by the API method. client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. - expected_result = MUT.Operation('create', self.OP_ID, op_begin, - instance=instance) + expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN, + self.LOCATION, instance=instance) # Create the mocks. prep_create_called = [] @@ -431,7 +440,7 @@ def mock_prep_create_req(instance): def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return self.OP_ID, op_begin + return self.OP_ID, self.LOCATION, OP_BEGIN # Perform the method and check the result. with _Monkey(MUT, @@ -794,10 +803,11 @@ def test_it(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' - EXPECTED_OPERATION_ID = 234 + LOCATION_ID = 'location' + OP_ID = 234 OPERATION_NAME = ( - 'operations/projects/%s/instances/%s/operations/%d' % - (PROJECT, INSTANCE_ID, EXPECTED_OPERATION_ID)) + 'operations/projects/%s/instances/%s/locations/%s/operations/%d' % + (PROJECT, INSTANCE_ID, LOCATION_ID, OP_ID)) current_op = operations_pb2.Operation(name=OPERATION_NAME) @@ -819,11 +829,12 @@ def mock_pb_timestamp_to_datetime(timestamp): # Exectute method with mocks in place. with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): - operation_id, operation_begin = self._callFUT(current_op) + op_id, loc_id, op_begin = self._callFUT(current_op) # Check outputs. - self.assertEqual(operation_id, EXPECTED_OPERATION_ID) - self.assertTrue(operation_begin is expected_operation_begin) + self.assertEqual(op_id, OP_ID) + self.assertTrue(op_begin is expected_operation_begin) + self.assertEqual(loc_id, LOCATION_ID) # Check mocks were used correctly. self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) From d2c9dafd99b60ce923a4772721780127d188ecb1 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 15:48:56 -0400 Subject: [PATCH 054/103] 'Row/ColumnFamily._table' has '_instance'. --- gcloud/bigtable/column_family.py | 6 +++--- gcloud/bigtable/row.py | 6 +++--- gcloud/bigtable/test_column_family.py | 4 ++-- gcloud/bigtable/test_row.py | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/gcloud/bigtable/column_family.py b/gcloud/bigtable/column_family.py index 9088e24a72ab..10127aa74961 100644 --- a/gcloud/bigtable/column_family.py +++ b/gcloud/bigtable/column_family.py @@ -261,7 +261,7 @@ def create(self): id=self.column_family_id, create=column_family, ) - client = self._table._cluster._client + client = self._table._instance._client # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. @@ -286,7 +286,7 @@ def update(self): request_pb.modifications.add( id=self.column_family_id, update=column_family) - client = self._table._cluster._client + client = self._table._instance._client # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. @@ -300,7 +300,7 @@ def delete(self): request_pb.modifications.add( id=self.column_family_id, drop=True) - client = self._table._cluster._client + client = self._table._instance._client # We expect a `google.protobuf.empty_pb2.Empty` client._table_stub.ModifyColumnFamilies(request_pb, client.timeout_seconds) diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py index 07cf9e1abec4..845747d41923 100644 --- a/gcloud/bigtable/row.py +++ b/gcloud/bigtable/row.py @@ -395,7 +395,7 @@ def commit(self): mutations=mutations_list, ) # We expect a `google.protobuf.empty_pb2.Empty` - client = self._table._cluster._client + client = self._table._instance._client client._data_stub.MutateRow(request_pb, client.timeout_seconds) self.clear() @@ -512,7 +512,7 @@ def commit(self): false_mutations=false_mutations, ) # We expect a `.messages_v2_pb2.CheckAndMutateRowResponse` - client = self._table._cluster._client + client = self._table._instance._client resp = client._data_stub.CheckAndMutateRow( request_pb, client.timeout_seconds) self.clear() @@ -800,7 +800,7 @@ def commit(self): rules=self._rule_pb_list, ) # We expect a `.data_v2_pb2.Row` - client = self._table._cluster._client + client = self._table._instance._client row_response = client._data_stub.ReadModifyWriteRow( request_pb, client.timeout_seconds) diff --git a/gcloud/bigtable/test_column_family.py b/gcloud/bigtable/test_column_family.py index 77d3f7bfdfd7..d9deaf841fa0 100644 --- a/gcloud/bigtable/test_column_family.py +++ b/gcloud/bigtable/test_column_family.py @@ -650,7 +650,7 @@ def _ColumnFamilyPB(*args, **kw): return table_v2_pb2.ColumnFamily(*args, **kw) -class _Cluster(object): +class _Instance(object): def __init__(self, client=None): self._client = client @@ -666,4 +666,4 @@ class _Table(object): def __init__(self, name, client=None): self.name = name - self._cluster = _Cluster(client) + self._instance = _Instance(client) diff --git a/gcloud/bigtable/test_row.py b/gcloud/bigtable/test_row.py index 2cc7630758d2..b5f486cbec0c 100644 --- a/gcloud/bigtable/test_row.py +++ b/gcloud/bigtable/test_row.py @@ -895,7 +895,7 @@ def __init__(self, timeout_seconds=None): self.timeout_seconds = timeout_seconds -class _Cluster(object): +class _Instance(object): def __init__(self, client=None): self._client = client @@ -905,4 +905,4 @@ class _Table(object): def __init__(self, name, client=None): self.name = name - self._cluster = _Cluster(client) + self._instance = _Instance(client) From a01d02e768816284e8f33baac0a8afbb5d19997c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 15:49:38 -0400 Subject: [PATCH 055/103] V2 UpdateTable no longer returns operation. --- system_tests/bigtable.py | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index 352bf7365d2e..c9867f480b98 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -175,29 +175,21 @@ def test_create_instance(self): self.assertEqual(instance.display_name, instance_alt.display_name) def test_update(self): - CURR_DISPLAY_NAME = Config.INSTANCE.display_name + OLD_DISPLAY_NAME = Config.INSTANCE.display_name NEW_DISPLAY_NAME = 'Foo Bar Baz' Config.INSTANCE.display_name = NEW_DISPLAY_NAME - operation = Config.INSTANCE.update() + Config.INSTANCE.update() - # We want to make sure the operation completes. - self.assertTrue(_operation_wait(operation)) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(INSTANCE_ID, - Config.LOCATION_NAME) - self.assertNotEqual(instance_alt.display_name, - Config.INSTANCE.display_name) + # Create a new instance instance and reload it. + instance_alt = Config.CLIENT.instance(INSTANCE_ID, None) + self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME) instance_alt.reload() self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) # Make sure to put the instance back the way it was for the # other test cases. - Config.INSTANCE.display_name = CURR_DISPLAY_NAME - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - self.assertTrue(_operation_wait(operation)) + Config.INSTANCE.display_name = OLD_DISPLAY_NAME + Config.INSTANCE.update() class TestTableAdminAPI(unittest2.TestCase): From 9bdeef7d6fb29ab76d55b946231dcb858feb0e91 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 16:00:50 -0400 Subject: [PATCH 056/103] 'PartialRowData' no longer keeps 'committed' state. The 'PartialRowsData' has state: 'Table.read_row()' ensures it is in the correct state before emitting the PRD. --- system_tests/bigtable.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index c9867f480b98..7db32e777602 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -357,7 +357,6 @@ def test_read_row(self): # Read back the contents of the row. partial_row_data = self._table.read_row(ROW_KEY) - self.assertTrue(partial_row_data.committed) self.assertEqual(partial_row_data.row_key, ROW_KEY) # Check the cells match. @@ -440,7 +439,6 @@ def test_read_with_label_applied(self): # Bring our two labeled columns together. row_filter = RowFilterUnion(filters=[chain1, chain2]) partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) - self.assertTrue(partial_row_data.committed) self.assertEqual(partial_row_data.row_key, ROW_KEY) cells_returned = partial_row_data.cells From 893d4518010bffe97db2624f064452148bad4dd2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 16:31:39 -0400 Subject: [PATCH 057/103] Typo fix. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1930#discussion_r68834783 --- system_tests/bigtable.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index 7db32e777602..735d5707f8aa 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -84,7 +84,7 @@ def _operation_wait(operation, max_attempts=5): return True -def _retry_backoof(meth, *args, **kw): +def _retry_backoff(meth, *args, **kw): from grpc.beta.interfaces import StatusCode from grpc.framework.interfaces.face.face import AbortionError backoff_intervals = [1, 2, 4, 8] @@ -107,7 +107,7 @@ def setUpModule(): Config.CLIENT = Client(admin=True) Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, Config.LOCATION_NAME) Config.CLIENT.start() - instances, failed_locations = _retry_backoof( + instances, failed_locations = _retry_backoff( Config.CLIENT.list_instances) if len(failed_locations) != 0: From d8a01c2dcb56d1edd5f826cb2cacc9bb6d697f14 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 16:52:48 -0400 Subject: [PATCH 058/103] Switch to passing location ID, rather than full path, to instance. It can synthesize the path using its client's project ID. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1930#discussion_r68834504 --- gcloud/bigtable/client.py | 3 +- gcloud/bigtable/instance.py | 23 ++++++----- gcloud/bigtable/test_client.py | 15 +++---- gcloud/bigtable/test_instance.py | 71 ++++++++++++++++---------------- system_tests/bigtable.py | 12 ++---- 5 files changed, 63 insertions(+), 61 deletions(-) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 82e96ef7a340..cf25d05f2a0d 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -45,6 +45,7 @@ from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES from gcloud.bigtable.instance import Instance +from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID from gcloud.client import _ClientFactoryMixin from gcloud.client import _ClientProjectMixin from gcloud.credentials import get_credentials @@ -376,7 +377,7 @@ def __exit__(self, exc_type, exc_val, exc_t): """Stops the client as a context manager.""" self.stop() - def instance(self, instance_id, location, + def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, display_name=None, serve_nodes=DEFAULT_SERVE_NODES): """Factory to create a instance associated with this client. diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py index ee539a19acfa..dec6c9029744 100644 --- a/gcloud/bigtable/instance.py +++ b/gcloud/bigtable/instance.py @@ -31,7 +31,7 @@ from gcloud.bigtable.table import Table -_EXISTING_INSTANCE_LOCATION = 'existing instance, location in cluster' +_EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') _OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/' @@ -65,7 +65,8 @@ def _prepare_create_request(instance): ) cluster = message.clusters[instance.instance_id] cluster.name = instance.name + '/clusters/' + instance.instance_id - cluster.location = instance._cluster_location + cluster.location = ( + parent_name + '/locations/' + instance._cluster_location_id) cluster.serve_nodes = instance._cluster_serve_nodes return message @@ -212,10 +213,10 @@ class Instance(object): :param client: The client that owns the instance. Provides authorization and a project ID. - :type location: string - :param location: location name, in form - ``projects//locations/``; used to - set up the instance's cluster. + :type location_id: str + :param location_id: ID of the location in which the instance will be + created. Required for instances which do not yet + exist. :type display_name: str :param display_name: (Optional) The display name for the instance in the @@ -228,11 +229,13 @@ class Instance(object): cluster; used to set up the instance's cluster. """ - def __init__(self, instance_id, client, location, display_name=None, + def __init__(self, instance_id, client, + location_id=_EXISTING_INSTANCE_LOCATION_ID, + display_name=None, serve_nodes=DEFAULT_SERVE_NODES): self.instance_id = instance_id self.display_name = display_name or instance_id - self._cluster_location = location + self._cluster_location_id = location_id self._cluster_serve_nodes = serve_nodes self._client = client @@ -272,7 +275,7 @@ def from_pb(cls, instance_pb, client): 'project ID on the client') instance_id = match.group('instance_id') - result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION) + result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID) result._update_from_pb(instance_pb) return result @@ -287,7 +290,7 @@ def copy(self): """ new_client = self._client.copy() return self.__class__(self.instance_id, new_client, - self._cluster_location, + self._cluster_location_id, display_name=self.display_name) @property diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index 0bbc5f35b3f4..dd58af08e025 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -529,21 +529,22 @@ def test_stop_while_stopped(self): def test_instance_factory_defaults(self): from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES from gcloud.bigtable.instance import Instance + from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' - LOCATION = 'projects/' + PROJECT + '/locations/locname' + LOCATION_ID = 'locname' credentials = _Credentials() client = self._makeOne(project=PROJECT, credentials=credentials) - instance = client.instance(INSTANCE_ID, LOCATION, - display_name=DISPLAY_NAME) + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) self.assertTrue(isinstance(instance, Instance)) self.assertEqual(instance.instance_id, INSTANCE_ID) self.assertEqual(instance.display_name, DISPLAY_NAME) - self.assertEqual(instance._cluster_location, LOCATION) + self.assertEqual(instance._cluster_location_id, + _EXISTING_INSTANCE_LOCATION_ID) self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) self.assertTrue(instance._client is client) @@ -553,19 +554,19 @@ def test_instance_factory_w_explicit_serve_nodes(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' - LOCATION = 'projects/' + PROJECT + '/locations/locname' + LOCATION_ID = 'locname' SERVE_NODES = 5 credentials = _Credentials() client = self._makeOne(project=PROJECT, credentials=credentials) instance = client.instance( INSTANCE_ID, display_name=DISPLAY_NAME, - location=LOCATION, serve_nodes=SERVE_NODES) + location=LOCATION_ID, serve_nodes=SERVE_NODES) self.assertTrue(isinstance(instance, Instance)) self.assertEqual(instance.instance_id, INSTANCE_ID) self.assertEqual(instance.display_name, DISPLAY_NAME) - self.assertEqual(instance._cluster_location, LOCATION) + self.assertEqual(instance._cluster_location_id, LOCATION_ID) self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES) self.assertTrue(instance._client is client) diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py index 6dd38d66789c..da8827685292 100644 --- a/gcloud/bigtable/test_instance.py +++ b/gcloud/bigtable/test_instance.py @@ -93,12 +93,11 @@ def _finished_helper(self, done): from gcloud.bigtable.instance import Instance PROJECT = 'PROJECT' - LOCATION = 'projects/' + PROJECT + '/locations/locname' INSTANCE_ID = 'instance-id' TIMEOUT_SECONDS = 1 client = _Client(PROJECT, timeout_seconds=TIMEOUT_SECONDS) - instance = Instance(INSTANCE_ID, client, LOCATION) + instance = Instance(INSTANCE_ID, client, self.LOCATION_ID) operation = self._makeOne( self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, instance=instance) @@ -146,7 +145,8 @@ class TestInstance(unittest2.TestCase): PROJECT = 'project' INSTANCE_ID = 'instance-id' INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID - LOCATION = 'projects/' + PROJECT + '/locations/locname' + LOCATION_ID = 'locname' + LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID DISPLAY_NAME = 'display_name' OP_ID = 8915 OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % @@ -166,18 +166,18 @@ def test_constructor_defaults(self): from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES client = object() - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertTrue(instance._client is client) - self.assertEqual(instance._cluster_location, self.LOCATION) + self.assertEqual(instance._cluster_location_id, self.LOCATION_ID) self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) def test_constructor_non_default(self): display_name = 'display_name' client = object() - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=display_name) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, display_name) @@ -187,7 +187,7 @@ def test_copy(self): display_name = 'display_name' client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=display_name) new_instance = instance.copy() @@ -201,7 +201,7 @@ def test_copy(self): def test_table_factory(self): from gcloud.bigtable.table import Table - instance = self._makeOne(self.INSTANCE_ID, None, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, None, self.LOCATION_ID) table = instance.table(self.TABLE_ID) self.assertTrue(isinstance(table, Table)) @@ -234,7 +234,7 @@ def test__update_from_pb_no_display_name(self): self.assertEqual(instance.display_name, None) def test_from_pb_success(self): - from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION + from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID from gcloud.bigtable._generated_v2 import ( instance_pb2 as data_v2_pb2) @@ -250,8 +250,8 @@ def test_from_pb_success(self): self.assertTrue(isinstance(instance, klass)) self.assertEqual(instance._client, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance._cluster_location, - _EXISTING_INSTANCE_LOCATION) + self.assertEqual(instance._cluster_location_id, + _EXISTING_INSTANCE_LOCATION_ID) def test_from_pb_bad_instance_name(self): from gcloud.bigtable._generated_v2 import ( @@ -282,31 +282,31 @@ def test_from_pb_project_mistmatch(self): def test_name_property(self): client = _Client(project=self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance.name, self.INSTANCE_NAME) def test___eq__(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) - instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance1, instance2) def test___eq__type_differ(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) instance2 = object() self.assertNotEqual(instance1, instance2) def test___ne__same_value(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) - instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) comparison_val = (instance1 != instance2) self.assertFalse(comparison_val) def test___ne__(self): - instance1 = self._makeOne('instance_id1', 'client1', self.LOCATION) - instance2 = self._makeOne('instance_id2', 'client2', self.LOCATION) + instance1 = self._makeOne('instance_id1', 'client1', self.LOCATION_ID) + instance2 = self._makeOne('instance_id2', 'client2', self.LOCATION_ID) self.assertNotEqual(instance1, instance2) def test_reload(self): @@ -317,7 +317,7 @@ def test_reload(self): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) # Create request_pb request_pb = messages_v2_pb.GetInstanceRequest( @@ -357,7 +357,7 @@ def test_create(self): from gcloud.bigtable import instance as MUT client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) # Create request_pb. Just a mock since we monkey patch # _prepare_create_request @@ -372,7 +372,7 @@ def test_create(self): # Create expected_result. expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN, - self.LOCATION, instance=instance) + self.LOCATION_ID, instance=instance) # Create the mocks. prep_create_called = [] @@ -385,7 +385,7 @@ def mock_prep_create_req(instance): def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return self.OP_ID, self.LOCATION, OP_BEGIN + return self.OP_ID, self.LOCATION_ID, OP_BEGIN # Perform the method and check the result. with _Monkey(MUT, @@ -411,7 +411,7 @@ def test_create_w_explicit_serve_nodes(self): SERVE_NODES = 5 client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, serve_nodes=SERVE_NODES) # Create request_pb. Just a mock since we monkey patch @@ -427,7 +427,7 @@ def test_create_w_explicit_serve_nodes(self): # Create expected_result. expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN, - self.LOCATION, instance=instance) + self.LOCATION_ID, instance=instance) # Create the mocks. prep_create_called = [] @@ -440,7 +440,7 @@ def mock_prep_create_req(instance): def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return self.OP_ID, self.LOCATION, OP_BEGIN + return self.OP_ID, self.LOCATION_ID, OP_BEGIN # Perform the method and check the result. with _Monkey(MUT, @@ -463,7 +463,7 @@ def test_update(self): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION, + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=self.DISPLAY_NAME) # Create request_pb @@ -498,7 +498,7 @@ def test_delete(self): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) # Create request_pb request_pb = messages_v2_pb.DeleteInstanceRequest( @@ -537,7 +537,7 @@ def test_list_clusters(self): SERVE_NODES = 4 client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) @@ -588,7 +588,7 @@ def _list_tables_helper(self, table_name=None): from gcloud.bigtable._testing import _FakeStub client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) # Create request_ request_pb = table_messages_v1_pb2.ListTablesRequest( @@ -640,7 +640,8 @@ def test_list_tables_failure_name_bad_before(self): class Test__prepare_create_request(unittest2.TestCase): PROJECT = 'PROJECT' PARENT = 'projects/' + PROJECT - LOCATION = 'projects/' + PROJECT + '/locations/locname' + LOCATION_ID = 'locname' + LOCATION_NAME = 'projects/' + PROJECT + '/locations/' + LOCATION_ID INSTANCE_ID = 'instance-id' INSTANCE_NAME = PARENT + '/instances/' + INSTANCE_ID CLUSTER_NAME = INSTANCE_NAME + '/clusters/' + INSTANCE_ID @@ -659,7 +660,7 @@ def test_w_defaults(self): client = _Client(self.PROJECT) - instance = Instance(self.INSTANCE_ID, client, self.LOCATION) + instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID) request_pb = self._callFUT(instance) self.assertTrue(isinstance(request_pb, messages_v2_pb.CreateInstanceRequest)) @@ -673,7 +674,7 @@ def test_w_defaults(self): cluster = request_pb.clusters[self.INSTANCE_ID] self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster)) self.assertEqual(cluster.name, self.CLUSTER_NAME) - self.assertEqual(cluster.location, self.LOCATION) + self.assertEqual(cluster.location, self.LOCATION_NAME) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_w_explicit_serve_nodes(self): @@ -685,7 +686,7 @@ def test_w_explicit_serve_nodes(self): DISPLAY_NAME = u'DISPLAY_NAME' SERVE_NODES = 5 client = _Client(self.PROJECT) - instance = Instance(self.INSTANCE_ID, client, self.LOCATION, + instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=DISPLAY_NAME, serve_nodes=SERVE_NODES) @@ -701,7 +702,7 @@ def test_w_explicit_serve_nodes(self): # An instance must also define a same-named cluster cluster = request_pb.clusters[self.INSTANCE_ID] self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster)) - self.assertEqual(cluster.location, self.LOCATION) + self.assertEqual(cluster.location, self.LOCATION_NAME) self.assertEqual(cluster.serve_nodes, SERVE_NODES) diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index 735d5707f8aa..c7e54016534a 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -61,7 +61,6 @@ class Config(object): """ CLIENT = None INSTANCE = None - LOCATION_NAME = None def _operation_wait(operation, max_attempts=5): @@ -103,9 +102,8 @@ def _retry_backoff(meth, *args, **kw): def setUpModule(): _helpers.PROJECT = TESTS_PROJECT PROJECT = os.getenv(TESTS_PROJECT) - Config.LOCATION_NAME = 'projects/%s/locations/%s' % (PROJECT, LOCATION_ID) Config.CLIENT = Client(admin=True) - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, Config.LOCATION_NAME) + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) Config.CLIENT.start() instances, failed_locations = _retry_backoff( Config.CLIENT.list_instances) @@ -148,7 +146,7 @@ def test_list_instances(self): def test_reload(self): # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. - instance = Config.CLIENT.instance(INSTANCE_ID, Config.LOCATION_NAME) + instance = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) # Make sure metadata unset before reloading. instance.display_name = None @@ -157,8 +155,7 @@ def test_reload(self): def test_create_instance(self): ALT_INSTANCE_ID = 'new' + unique_resource_id('-') - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, Config.LOCATION_NAME) + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) operation = instance.create() # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) @@ -167,8 +164,7 @@ def test_create_instance(self): self.assertTrue(_operation_wait(operation)) # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, - Config.LOCATION_NAME) + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) instance_alt.reload() self.assertEqual(instance, instance_alt) From 6f4acedfe7af96f5d7893d2daad9aa23362e9b58 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 18:37:13 -0400 Subject: [PATCH 059/103] Lint fixes. --- gcloud/bigtable/test_client.py | 1 - system_tests/bigtable.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index dd58af08e025..435798ecdf61 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -534,7 +534,6 @@ def test_instance_factory_defaults(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' - LOCATION_ID = 'locname' credentials = _Credentials() client = self._makeOne(project=PROJECT, credentials=credentials) diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index c7e54016534a..6933bc60847c 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -14,7 +14,6 @@ import datetime import operator -import os import time import unittest2 @@ -101,7 +100,6 @@ def _retry_backoff(meth, *args, **kw): def setUpModule(): _helpers.PROJECT = TESTS_PROJECT - PROJECT = os.getenv(TESTS_PROJECT) Config.CLIENT = Client(admin=True) Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) Config.CLIENT.start() From 82adc5eb62798b7d75ad2380c550a9b86ff0b9fc Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 17:15:12 -0400 Subject: [PATCH 060/103] Adjust to V2 'instance'-based patterns. --- gcloud/bigtable/happybase/__init__.py | 10 +- gcloud/bigtable/happybase/connection.py | 76 +++---- gcloud/bigtable/happybase/pool.py | 8 +- gcloud/bigtable/happybase/table.py | 4 +- gcloud/bigtable/happybase/test_connection.py | 228 +++++++++---------- gcloud/bigtable/happybase/test_pool.py | 68 +++--- gcloud/bigtable/happybase/test_table.py | 10 +- system_tests/bigtable_happybase.py | 15 +- 8 files changed, 209 insertions(+), 210 deletions(-) diff --git a/gcloud/bigtable/happybase/__init__.py b/gcloud/bigtable/happybase/__init__.py index 30955b8be936..076a28d7c4d4 100644 --- a/gcloud/bigtable/happybase/__init__.py +++ b/gcloud/bigtable/happybase/__init__.py @@ -84,9 +84,9 @@ * ``protocol`` * In order to make :class:`Connection ` - compatible with Cloud Bigtable, we add a ``cluster`` keyword argument to + compatible with Cloud Bigtable, we add a ``instance`` keyword argument to allow users to pass in their own - :class:`Cluster ` (which they can + :class:`Instance ` (which they can construct beforehand). For example: @@ -95,11 +95,11 @@ from gcloud.bigtable.client import Client client = Client(project=PROJECT_ID, admin=True) - cluster = client.cluster(zone, cluster_id) - cluster.reload() + instance = client.instance(instance_id, location_id) + instance.reload() from gcloud.bigtable.happybase import Connection - connection = Connection(cluster=cluster) + connection = Connection(instance=instance) * Any uses of the ``wal`` (Write Ahead Log) argument will result in a warning as well. This includes uses in: diff --git a/gcloud/bigtable/happybase/connection.py b/gcloud/bigtable/happybase/connection.py index bc57b3429eed..ebea84e93998 100644 --- a/gcloud/bigtable/happybase/connection.py +++ b/gcloud/bigtable/happybase/connection.py @@ -54,25 +54,25 @@ 'of enabled / disabled tables.') -def _get_cluster(timeout=None): - """Gets cluster for the default project. +def _get_instance(timeout=None): + """Gets instance for the default project. Creates a client with the inferred credentials and project ID from the local environment. Then uses - :meth:`.bigtable.client.Client.list_clusters` to - get the unique cluster owned by the project. + :meth:`.bigtable.client.Client.list_instances` to + get the unique instance owned by the project. - If the request fails for any reason, or if there isn't exactly one cluster + If the request fails for any reason, or if there isn't exactly one instance owned by the project, then this function will fail. :type timeout: int :param timeout: (Optional) The socket timeout in milliseconds. - :rtype: :class:`gcloud.bigtable.cluster.Cluster` - :returns: The unique cluster owned by the project inferred from + :rtype: :class:`gcloud.bigtable.instance.Instance` + :returns: The unique instance owned by the project inferred from the environment. :raises: :class:`ValueError ` if there is a failed - zone or any number of clusters other than one. + location or any number of instances other than one. """ client_kwargs = {'admin': True} if timeout is not None: @@ -80,20 +80,20 @@ def _get_cluster(timeout=None): client = Client(**client_kwargs) try: client.start() - clusters, failed_zones = client.list_clusters() + instances, failed_locations = client.list_instances() finally: client.stop() - if len(failed_zones) != 0: - raise ValueError('Determining cluster via ListClusters encountered ' - 'failed zones.') - if len(clusters) == 0: - raise ValueError('This client doesn\'t have access to any clusters.') - if len(clusters) > 1: - raise ValueError('This client has access to more than one cluster. ' - 'Please directly pass the cluster you\'d ' + if len(failed_locations) != 0: + raise ValueError('Determining instance via ListInstances encountered ' + 'failed locations.') + if len(instances) == 0: + raise ValueError('This client doesn\'t have access to any instances.') + if len(instances) > 1: + raise ValueError('This client has access to more than one instance. ' + 'Please directly pass the instance you\'d ' 'like to use.') - return clusters[0] + return instances[0] class Connection(object): @@ -101,10 +101,10 @@ class Connection(object): .. note:: - If you pass a ``cluster``, it will be :meth:`.Cluster.copy`-ed before + If you pass a ``instance``, it will be :meth:`.Instance.copy`-ed before being stored on the new connection. This also copies the :class:`Client ` that created the - :class:`Cluster ` instance and the + :class:`Instance ` instance and the :class:`Credentials ` stored on the client. @@ -127,8 +127,8 @@ class Connection(object): :param table_prefix_separator: (Optional) Separator used with ``table_prefix``. Defaults to ``_``. - :type cluster: :class:`Cluster ` - :param cluster: (Optional) A Cloud Bigtable cluster. The instance also + :type instance: :class:`Instance ` + :param instance: (Optional) A Cloud Bigtable instance. The instance also owns a client for making gRPC requests to the Cloud Bigtable API. If not passed in, defaults to creating client with ``admin=True`` and using the ``timeout`` here for the @@ -136,7 +136,7 @@ class Connection(object): :class:`Client ` constructor. The credentials for the client will be the implicit ones loaded from the environment. - Then that client is used to retrieve all the clusters + Then that client is used to retrieve all the instances owned by the client's project. :type kwargs: dict @@ -144,10 +144,10 @@ class Connection(object): compatibility. """ - _cluster = None + _instance = None def __init__(self, timeout=None, autoconnect=True, table_prefix=None, - table_prefix_separator='_', cluster=None, **kwargs): + table_prefix_separator='_', instance=None, **kwargs): self._handle_legacy_args(kwargs) if table_prefix is not None: if not isinstance(table_prefix, six.string_types): @@ -162,13 +162,13 @@ def __init__(self, timeout=None, autoconnect=True, table_prefix=None, self.table_prefix = table_prefix self.table_prefix_separator = table_prefix_separator - if cluster is None: - self._cluster = _get_cluster(timeout=timeout) + if instance is None: + self._instance = _get_instance(timeout=timeout) else: if timeout is not None: raise ValueError('Timeout cannot be used when an existing ' - 'cluster is passed') - self._cluster = cluster.copy() + 'instance is passed') + self._instance = instance.copy() if autoconnect: self.open() @@ -203,23 +203,23 @@ def open(self): This method opens the underlying HTTP/2 gRPC connection using a :class:`Client ` bound to the - :class:`Cluster ` owned by + :class:`Instance ` owned by this connection. """ - self._cluster._client.start() + self._instance._client.start() def close(self): """Close the underlying transport to Cloud Bigtable. This method closes the underlying HTTP/2 gRPC connection using a :class:`Client ` bound to the - :class:`Cluster ` owned by + :class:`Instance ` owned by this connection. """ - self._cluster._client.stop() + self._instance._client.stop() def __del__(self): - if self._cluster is not None: + if self._instance is not None: self.close() def _table_name(self, name): @@ -258,7 +258,7 @@ def tables(self): .. note:: - This lists every table in the cluster owned by this connection, + This lists every table in the instance owned by this connection, **not** every table that a given user may have access to. .. note:: @@ -269,7 +269,7 @@ def tables(self): :rtype: list :returns: List of string table names. """ - low_level_table_instances = self._cluster.list_tables() + low_level_table_instances = self._instance.list_tables() table_names = [table_instance.table_id for table_instance in low_level_table_instances] @@ -345,7 +345,7 @@ def create_table(self, name, families): # Create table instance and then make API calls. name = self._table_name(name) - low_level_table = _LowLevelTable(name, self._cluster) + low_level_table = _LowLevelTable(name, self._instance) try: low_level_table.create() except face.NetworkError as network_err: @@ -376,7 +376,7 @@ def delete_table(self, name, disable=False): _WARN(_DISABLE_DELETE_MSG) name = self._table_name(name) - _LowLevelTable(name, self._cluster).delete() + _LowLevelTable(name, self._instance).delete() def enable_table(self, name): """Enable the specified table. diff --git a/gcloud/bigtable/happybase/pool.py b/gcloud/bigtable/happybase/pool.py index ab84724740a2..1ed22cdd6c84 100644 --- a/gcloud/bigtable/happybase/pool.py +++ b/gcloud/bigtable/happybase/pool.py @@ -21,7 +21,7 @@ import six from gcloud.bigtable.happybase.connection import Connection -from gcloud.bigtable.happybase.connection import _get_cluster +from gcloud.bigtable.happybase.connection import _get_instance _MIN_POOL_SIZE = 1 @@ -45,7 +45,7 @@ class ConnectionPool(object): :class:`Connection <.happybase.connection.Connection>` constructor **except** for ``autoconnect``. This is because the ``open`` / ``closed`` status of a connection is managed by the pool. In addition, - if ``cluster`` is not passed, the default / inferred cluster is + if ``instance`` is not passed, the default / inferred instance is determined by the pool and then passed to each :class:`Connection <.happybase.connection.Connection>` that is created. @@ -75,8 +75,8 @@ def __init__(self, size, **kwargs): connection_kwargs = kwargs connection_kwargs['autoconnect'] = False - if 'cluster' not in connection_kwargs: - connection_kwargs['cluster'] = _get_cluster( + if 'instance' not in connection_kwargs: + connection_kwargs['instance'] = _get_instance( timeout=kwargs.get('timeout')) for _ in six.moves.range(size): diff --git a/gcloud/bigtable/happybase/table.py b/gcloud/bigtable/happybase/table.py index 3f87f953c026..8aea3ca42241 100644 --- a/gcloud/bigtable/happybase/table.py +++ b/gcloud/bigtable/happybase/table.py @@ -109,13 +109,13 @@ class Table(object): def __init__(self, name, connection): self.name = name - # This remains as legacy for HappyBase, but only the cluster + # This remains as legacy for HappyBase, but only the instance # from the connection is needed. self.connection = connection self._low_level_table = None if self.connection is not None: self._low_level_table = _LowLevelTable(self.name, - self.connection._cluster) + self.connection._instance) def __repr__(self): return '' % (self.name,) diff --git a/gcloud/bigtable/happybase/test_connection.py b/gcloud/bigtable/happybase/test_connection.py index 2c96b9d1721b..39dea64fbf5f 100644 --- a/gcloud/bigtable/happybase/test_connection.py +++ b/gcloud/bigtable/happybase/test_connection.py @@ -18,27 +18,27 @@ import unittest2 -class Test__get_cluster(unittest2.TestCase): +class Test__get_instance(unittest2.TestCase): def _callFUT(self, timeout=None): - from gcloud.bigtable.happybase.connection import _get_cluster - return _get_cluster(timeout=timeout) + from gcloud.bigtable.happybase.connection import _get_instance + return _get_instance(timeout=timeout) - def _helper(self, timeout=None, clusters=(), failed_zones=()): + def _helper(self, timeout=None, instances=(), failed_locations=()): from functools import partial from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - client_with_clusters = partial(_Client, clusters=clusters, - failed_zones=failed_zones) - with _Monkey(MUT, Client=client_with_clusters): + client_with_instances = partial(_Client, instances=instances, + failed_locations=failed_locations) + with _Monkey(MUT, Client=client_with_instances): result = self._callFUT(timeout=timeout) # If we've reached this point, then _callFUT didn't fail, so we know - # there is exactly one cluster. - cluster, = clusters - self.assertEqual(result, cluster) - client = cluster.client + # there is exactly one instance. + instance, = instances + self.assertEqual(result, instance) + client = instance.client self.assertEqual(client.args, ()) expected_kwargs = {'admin': True} if timeout is not None: @@ -48,28 +48,28 @@ def _helper(self, timeout=None, clusters=(), failed_zones=()): self.assertEqual(client.stop_calls, 1) def test_default(self): - cluster = _Cluster() - self._helper(clusters=[cluster]) + instance = _Instance() + self._helper(instances=[instance]) def test_with_timeout(self): - cluster = _Cluster() - self._helper(timeout=2103, clusters=[cluster]) + instance = _Instance() + self._helper(timeout=2103, instances=[instance]) - def test_with_no_clusters(self): + def test_with_no_instances(self): with self.assertRaises(ValueError): self._helper() - def test_with_too_many_clusters(self): - clusters = [_Cluster(), _Cluster()] + def test_with_too_many_instances(self): + instances = [_Instance(), _Instance()] with self.assertRaises(ValueError): - self._helper(clusters=clusters) + self._helper(instances=instances) - def test_with_failed_zones(self): - cluster = _Cluster() - failed_zone = 'us-central1-c' + def test_with_failed_locations(self): + instance = _Instance() + failed_location = 'us-central1-c' with self.assertRaises(ValueError): - self._helper(clusters=[cluster], - failed_zones=[failed_zone]) + self._helper(instances=[instance], + failed_locations=[failed_location]) class TestConnection(unittest2.TestCase): @@ -82,65 +82,65 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_constructor_defaults(self): - cluster = _Cluster() # Avoid implicit environ check. - self.assertEqual(cluster._client.start_calls, 0) - connection = self._makeOne(cluster=cluster) - self.assertEqual(cluster._client.start_calls, 1) - self.assertEqual(cluster._client.stop_calls, 0) + instance = _Instance() # Avoid implicit environ check. + self.assertEqual(instance._client.start_calls, 0) + connection = self._makeOne(instance=instance) + self.assertEqual(instance._client.start_calls, 1) + self.assertEqual(instance._client.stop_calls, 0) - self.assertEqual(connection._cluster, cluster) + self.assertEqual(connection._instance, instance) self.assertEqual(connection.table_prefix, None) self.assertEqual(connection.table_prefix_separator, '_') def test_constructor_no_autoconnect(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.start_calls, 0) - self.assertEqual(cluster._client.stop_calls, 0) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.start_calls, 0) + self.assertEqual(instance._client.stop_calls, 0) self.assertEqual(connection.table_prefix, None) self.assertEqual(connection.table_prefix_separator, '_') - def test_constructor_missing_cluster(self): + def test_constructor_missing_instance(self): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() + instance = _Instance() timeout = object() - get_cluster_called = [] + get_instance_called = [] - def mock_get_cluster(timeout): - get_cluster_called.append(timeout) - return cluster + def mock_get_instance(timeout): + get_instance_called.append(timeout) + return instance - with _Monkey(MUT, _get_cluster=mock_get_cluster): - connection = self._makeOne(autoconnect=False, cluster=None, + with _Monkey(MUT, _get_instance=mock_get_instance): + connection = self._makeOne(autoconnect=False, instance=None, timeout=timeout) self.assertEqual(connection.table_prefix, None) self.assertEqual(connection.table_prefix_separator, '_') - self.assertEqual(connection._cluster, cluster) + self.assertEqual(connection._instance, instance) - self.assertEqual(get_cluster_called, [timeout]) + self.assertEqual(get_instance_called, [timeout]) def test_constructor_explicit(self): autoconnect = False table_prefix = 'table-prefix' table_prefix_separator = 'sep' - cluster_copy = _Cluster() - cluster = _Cluster(copies=[cluster_copy]) + instance_copy = _Instance() + instance = _Instance(copies=[instance_copy]) connection = self._makeOne( autoconnect=autoconnect, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) self.assertEqual(connection.table_prefix, table_prefix) self.assertEqual(connection.table_prefix_separator, table_prefix_separator) def test_constructor_with_unknown_argument(self): - cluster = _Cluster() + instance = _Instance() with self.assertRaises(TypeError): - self._makeOne(cluster=cluster, unknown='foo') + self._makeOne(instance=instance, unknown='foo') def test_constructor_with_legacy_args(self): from gcloud._testing import _Monkey @@ -151,9 +151,9 @@ def test_constructor_with_legacy_args(self): def mock_warn(msg): warned.append(msg) - cluster = _Cluster() + instance = _Instance() with _Monkey(MUT, _WARN=mock_warn): - self._makeOne(cluster=cluster, host=object(), + self._makeOne(instance=instance, host=object(), port=object(), compat=object(), transport=object(), protocol=object()) @@ -164,10 +164,10 @@ def mock_warn(msg): self.assertIn('transport', warned[0]) self.assertIn('protocol', warned[0]) - def test_constructor_with_timeout_and_cluster(self): - cluster = _Cluster() + def test_constructor_with_timeout_and_instance(self): + instance = _Instance() with self.assertRaises(ValueError): - self._makeOne(cluster=cluster, timeout=object()) + self._makeOne(instance=instance, timeout=object()) def test_constructor_non_string_prefix(self): table_prefix = object() @@ -184,46 +184,46 @@ def test_constructor_non_string_prefix_separator(self): table_prefix_separator=table_prefix_separator) def test_open(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.start_calls, 0) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.start_calls, 0) connection.open() - self.assertEqual(cluster._client.start_calls, 1) - self.assertEqual(cluster._client.stop_calls, 0) + self.assertEqual(instance._client.start_calls, 1) + self.assertEqual(instance._client.stop_calls, 0) def test_close(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.stop_calls, 0) connection.close() - self.assertEqual(cluster._client.stop_calls, 1) - self.assertEqual(cluster._client.start_calls, 0) + self.assertEqual(instance._client.stop_calls, 1) + self.assertEqual(instance._client.start_calls, 0) - def test___del__with_cluster(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) + def test___del__with_instance(self): + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.stop_calls, 0) connection.__del__() - self.assertEqual(cluster._client.stop_calls, 1) + self.assertEqual(instance._client.stop_calls, 1) - def test___del__no_cluster(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) - del connection._cluster + def test___del__no_instance(self): + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.stop_calls, 0) + del connection._instance connection.__del__() - self.assertEqual(cluster._client.stop_calls, 0) + self.assertEqual(instance._client.stop_calls, 0) def test__table_name_with_prefix_set(self): table_prefix = 'table-prefix' table_prefix_separator = '<>' - cluster = _Cluster() + instance = _Instance() connection = self._makeOne( autoconnect=False, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) name = 'some-name' prefixed = connection._table_name(name) @@ -231,9 +231,9 @@ def test__table_name_with_prefix_set(self): table_prefix + table_prefix_separator + name) def test__table_name_with_no_prefix_set(self): - cluster = _Cluster() + instance = _Instance() connection = self._makeOne(autoconnect=False, - cluster=cluster) + instance=instance) name = 'some-name' prefixed = connection._table_name(name) @@ -242,8 +242,8 @@ def test__table_name_with_no_prefix_set(self): def test_table_factory(self): from gcloud.bigtable.happybase.table import Table - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' table = connection.table(name) @@ -255,13 +255,13 @@ def test_table_factory(self): def _table_factory_prefix_helper(self, use_prefix=True): from gcloud.bigtable.happybase.table import Table - cluster = _Cluster() # Avoid implicit environ check. + instance = _Instance() # Avoid implicit environ check. table_prefix = 'table-prefix' table_prefix_separator = '<>' connection = self._makeOne( autoconnect=False, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) name = 'table-name' table = connection.table(name, use_prefix=use_prefix) @@ -285,11 +285,11 @@ def test_tables(self): table_name1 = 'table-name1' table_name2 = 'table-name2' - cluster = _Cluster(list_tables_result=[ + instance = _Instance(list_tables_result=[ Table(table_name1, None), Table(table_name2, None), ]) - connection = self._makeOne(autoconnect=False, cluster=cluster) + connection = self._makeOne(autoconnect=False, instance=instance) result = connection.tables() self.assertEqual(result, [table_name1, table_name2]) @@ -303,12 +303,12 @@ def test_tables_with_prefix(self): table_name1 = (table_prefix + table_prefix_separator + unprefixed_table_name1) table_name2 = 'table-name2' - cluster = _Cluster(list_tables_result=[ + instance = _Instance(list_tables_result=[ Table(table_name1, None), Table(table_name2, None), ]) connection = self._makeOne( - autoconnect=False, cluster=cluster, table_prefix=table_prefix, + autoconnect=False, instance=instance, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator) result = connection.tables() self.assertEqual(result, [unprefixed_table_name1]) @@ -318,8 +318,8 @@ def test_create_table(self): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) mock_gc_rule = object() called_options = [] @@ -354,7 +354,7 @@ def make_table(*args, **kwargs): # Just one table would have been created. table_instance, = tables_created - self.assertEqual(table_instance.args, (name, cluster)) + self.assertEqual(table_instance.args, (name, instance)) self.assertEqual(table_instance.kwargs, {}) self.assertEqual(table_instance.create_calls, 1) @@ -380,8 +380,8 @@ def make_table(*args, **kwargs): self.assertEqual(col_fam_created[2].create_calls, 1) def test_create_table_bad_type(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' families = None @@ -389,8 +389,8 @@ def test_create_table_bad_type(self): connection.create_table(name, families) def test_create_table_bad_value(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' families = {} @@ -401,8 +401,8 @@ def _create_table_error_helper(self, err_val, err_type): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) tables_created = [] @@ -450,8 +450,8 @@ def _delete_table_helper(self, disable=False): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) tables_created = [] @@ -466,7 +466,7 @@ def make_table(*args, **kwargs): # Just one table would have been created. table_instance, = tables_created - self.assertEqual(table_instance.args, (name, cluster)) + self.assertEqual(table_instance.args, (name, instance)) self.assertEqual(table_instance.kwargs, {}) self.assertEqual(table_instance.delete_calls, 1) @@ -488,32 +488,32 @@ def mock_warn(msg): self.assertEqual(warned, [MUT._DISABLE_DELETE_MSG]) def test_enable_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' with self.assertRaises(NotImplementedError): connection.enable_table(name) def test_disable_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' with self.assertRaises(NotImplementedError): connection.disable_table(name) def test_is_table_enabled(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' with self.assertRaises(NotImplementedError): connection.is_table_enabled(name) def test_compact_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' major = True @@ -608,10 +608,10 @@ def test_non_dictionary(self): class _Client(object): def __init__(self, *args, **kwargs): - self.clusters = kwargs.pop('clusters', []) - for cluster in self.clusters: - cluster.client = self - self.failed_zones = kwargs.pop('failed_zones', []) + self.instances = kwargs.pop('instances', []) + for instance in self.instances: + instance.client = self + self.failed_locations = kwargs.pop('failed_locations', []) self.args = args self.kwargs = kwargs self.start_calls = 0 @@ -623,11 +623,11 @@ def start(self): def stop(self): self.stop_calls += 1 - def list_clusters(self): - return self.clusters, self.failed_zones + def list_instances(self): + return self.instances, self.failed_locations -class _Cluster(object): +class _Instance(object): def __init__(self, copies=(), list_tables_result=()): self.copies = list(copies) diff --git a/gcloud/bigtable/happybase/test_pool.py b/gcloud/bigtable/happybase/test_pool.py index c3634681e45d..d4485765e8c5 100644 --- a/gcloud/bigtable/happybase/test_pool.py +++ b/gcloud/bigtable/happybase/test_pool.py @@ -31,10 +31,10 @@ def test_constructor_defaults(self): from gcloud.bigtable.happybase.connection import Connection size = 11 - cluster_copy = _Cluster() - all_copies = [cluster_copy] * size - cluster = _Cluster(copies=all_copies) # Avoid implicit environ check. - pool = self._makeOne(size, cluster=cluster) + instance_copy = _Instance() + all_copies = [instance_copy] * size + instance = _Instance(copies=all_copies) # Avoid implicit environ check. + pool = self._makeOne(size, instance=instance) self.assertTrue(isinstance(pool._lock, type(threading.Lock()))) self.assertTrue(isinstance(pool._thread_connections, threading.local)) @@ -46,17 +46,17 @@ def test_constructor_defaults(self): self.assertEqual(queue.maxsize, size) for connection in queue.queue: self.assertTrue(isinstance(connection, Connection)) - self.assertTrue(connection._cluster is cluster_copy) + self.assertTrue(connection._instance is instance_copy) def test_constructor_passes_kwargs(self): table_prefix = 'foo' table_prefix_separator = '<>' - cluster = _Cluster() # Avoid implicit environ check. + instance = _Instance() # Avoid implicit environ check. size = 1 pool = self._makeOne(size, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) for connection in pool._queue.queue: self.assertEqual(connection.table_prefix, table_prefix) @@ -76,53 +76,53 @@ def open(self): self._open_called = True # First make sure the custom Connection class does as expected. - cluster_copy1 = _Cluster() - cluster_copy2 = _Cluster() - cluster_copy3 = _Cluster() - cluster = _Cluster( - copies=[cluster_copy1, cluster_copy2, cluster_copy3]) - connection = ConnectionWithOpen(autoconnect=False, cluster=cluster) + instance_copy1 = _Instance() + instance_copy2 = _Instance() + instance_copy3 = _Instance() + instance = _Instance( + copies=[instance_copy1, instance_copy2, instance_copy3]) + connection = ConnectionWithOpen(autoconnect=False, instance=instance) self.assertFalse(connection._open_called) - self.assertTrue(connection._cluster is cluster_copy1) - connection = ConnectionWithOpen(autoconnect=True, cluster=cluster) + self.assertTrue(connection._instance is instance_copy1) + connection = ConnectionWithOpen(autoconnect=True, instance=instance) self.assertTrue(connection._open_called) - self.assertTrue(connection._cluster is cluster_copy2) + self.assertTrue(connection._instance is instance_copy2) # Then make sure autoconnect=True is ignored in a pool. size = 1 with _Monkey(MUT, Connection=ConnectionWithOpen): - pool = self._makeOne(size, autoconnect=True, cluster=cluster) + pool = self._makeOne(size, autoconnect=True, instance=instance) for connection in pool._queue.queue: self.assertTrue(isinstance(connection, ConnectionWithOpen)) - self.assertTrue(connection._cluster is cluster_copy3) + self.assertTrue(connection._instance is instance_copy3) self.assertFalse(connection._open_called) - def test_constructor_infers_cluster(self): + def test_constructor_infers_instance(self): from gcloud._testing import _Monkey from gcloud.bigtable.happybase.connection import Connection from gcloud.bigtable.happybase import pool as MUT size = 1 - cluster_copy = _Cluster() - all_copies = [cluster_copy] * size - cluster = _Cluster(copies=all_copies) - get_cluster_calls = [] + instance_copy = _Instance() + all_copies = [instance_copy] * size + instance = _Instance(copies=all_copies) + get_instance_calls = [] - def mock_get_cluster(timeout=None): - get_cluster_calls.append(timeout) - return cluster + def mock_get_instance(timeout=None): + get_instance_calls.append(timeout) + return instance - with _Monkey(MUT, _get_cluster=mock_get_cluster): + with _Monkey(MUT, _get_instance=mock_get_instance): pool = self._makeOne(size) for connection in pool._queue.queue: self.assertTrue(isinstance(connection, Connection)) # We know that the Connection() constructor will - # call cluster.copy(). - self.assertTrue(connection._cluster is cluster_copy) + # call instance.copy(). + self.assertTrue(connection._instance is instance_copy) - self.assertEqual(get_cluster_calls, [None]) + self.assertEqual(get_instance_calls, [None]) def test_constructor_non_integer_size(self): size = None @@ -142,11 +142,11 @@ def _makeOneWithMockQueue(self, queue_return): from gcloud.bigtable.happybase import pool as MUT # We are going to use a fake queue, so we don't want any connections - # or clusters to be created in the constructor. + # or instances to be created in the constructor. size = -1 - cluster = object() + instance = object() with _Monkey(MUT, _MIN_POOL_SIZE=size): - pool = self._makeOne(size, cluster=cluster) + pool = self._makeOne(size, instance=instance) pool._queue = _Queue(queue_return) return pool @@ -230,7 +230,7 @@ def open(self): pass -class _Cluster(object): +class _Instance(object): def __init__(self, copies=()): self.copies = list(copies) diff --git a/gcloud/bigtable/happybase/test_table.py b/gcloud/bigtable/happybase/test_table.py index be18ec1bc014..7efa1864d91d 100644 --- a/gcloud/bigtable/happybase/test_table.py +++ b/gcloud/bigtable/happybase/test_table.py @@ -51,8 +51,8 @@ def test_constructor(self): from gcloud.bigtable.happybase import table as MUT name = 'table-name' - cluster = object() - connection = _Connection(cluster) + instance = object() + connection = _Connection(instance) tables_constructed = [] def make_low_level_table(*args, **kwargs): @@ -67,7 +67,7 @@ def make_low_level_table(*args, **kwargs): table_instance, = tables_constructed self.assertEqual(table._low_level_table, table_instance) - self.assertEqual(table_instance.args, (name, cluster)) + self.assertEqual(table_instance.args, (name, instance)) self.assertEqual(table_instance.kwargs, {}) def test_constructor_null_connection(self): @@ -1405,8 +1405,8 @@ def test_many_rows(self): class _Connection(object): - def __init__(self, cluster): - self._cluster = cluster + def __init__(self, instance): + self._instance = instance class _MockLowLevelColumnFamily(object): diff --git a/system_tests/bigtable_happybase.py b/system_tests/bigtable_happybase.py index 231f1189d61c..26a3e870a160 100644 --- a/system_tests/bigtable_happybase.py +++ b/system_tests/bigtable_happybase.py @@ -30,9 +30,8 @@ _PACK_I64 = struct.Struct('>q').pack _FIRST_ELT = operator.itemgetter(0) _helpers.PROJECT = TESTS_PROJECT -ZONE = 'us-central1-c' -CLUSTER_ID = 'gcloud' + unique_resource_id('-') -CLUSTER_ID = CLUSTER_ID[:30] # Cluster IDs can't exceed 30 chars. +LOCATION_ID = 'us-central1-c' +INSTANCE_ID = 'gcloud' + unique_resource_id('-') TABLE_NAME = 'table-name' ALT_TABLE_NAME = 'other-table' TTL_FOR_TEST = 3 @@ -65,12 +64,12 @@ class Config(object): def set_connection(): client = client_mod.Client(admin=True) - cluster = client.cluster(ZONE, CLUSTER_ID) + instance = client.instance(INSTANCE_ID, LOCATION_ID) client.start() - operation = cluster.create() + operation = instance.create() if not _operation_wait(operation): - raise RuntimeError('Cluster creation exceed 5 seconds.') - Config.CONNECTION = Connection(cluster=cluster) + raise RuntimeError('Instance creation exceed 5 seconds.') + Config.CONNECTION = Connection(instance=instance) def setUpModule(): @@ -81,7 +80,7 @@ def setUpModule(): def tearDownModule(): Config.CONNECTION.delete_table(TABLE_NAME) - Config.CONNECTION._cluster.delete() + Config.CONNECTION._instance.delete() Config.CONNECTION.close() From 40b20166ef81017a277223616d8efa44074d47ab Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 17:48:25 -0400 Subject: [PATCH 061/103] Rename testcases for clarity. --- gcloud/bigtable/test_table.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index df385214d77a..ce329e5cb9f1 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -281,11 +281,11 @@ def mock_create_row_request(table_name, row_key, filter_): self.assertEqual(mock_created, [(table.name, self.ROW_KEY, filter_obj)]) - def test_read_empty_row(self): + def test_read_row_miss(self): chunks = [] self._read_row_helper(chunks, None) - def test_read_row(self): + def test_read_row_complete(self): from gcloud.bigtable.row_data import Cell from gcloud.bigtable.row_data import PartialRowData From 9dfa82361b2af89267aba488982997e59b1baddb Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 18:04:46 -0400 Subject: [PATCH 062/103] 'Table.read_rows': no responses -> None. This is the common case for a miss, rather than a response with no chunks. --- gcloud/bigtable/table.py | 2 +- gcloud/bigtable/test_table.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index c0a3b6e335c0..3eef6fe2a5ad 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -234,7 +234,7 @@ def read_row(self, row_key, filter_=None): client.timeout_seconds) rows_data = PartialRowsData(response_iterator) rows_data.consume_all() - if rows_data.state != rows_data.NEW_ROW: + if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): raise ValueError('The row remains partial / is not committed.') if len(rows_data.rows) == 0: diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index ce329e5cb9f1..2ac3eb1b001e 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -261,8 +261,11 @@ def mock_create_row_request(table_name, row_key, filter_): return request_pb # Create response_iterator - response_pb = _ReadRowsResponsePB(chunks=chunks) - response_iterator = iter([response_pb]) + if chunks is None: + response_iterator = iter(()) # no responses at all + else: + response_pb = _ReadRowsResponsePB(chunks=chunks) + response_iterator = iter([response_pb]) # Patch the stub used by the API method. client._data_stub = stub = _FakeStub(response_iterator) @@ -281,7 +284,10 @@ def mock_create_row_request(table_name, row_key, filter_): self.assertEqual(mock_created, [(table.name, self.ROW_KEY, filter_obj)]) - def test_read_row_miss(self): + def test_read_row_miss_no__responses(self): + self._read_row_helper(None, None) + + def test_read_row_miss_no_chunks_in_response(self): chunks = [] self._read_row_helper(chunks, None) From e92da5c88a6702c104c7112fd22f9b75f2cf86ad Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 18:18:38 -0400 Subject: [PATCH 063/103] V2 wraps row for 'ReadModifyWriteRow' request in new response message --- gcloud/bigtable/row.py | 2 +- gcloud/bigtable/test_row.py | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py index 845747d41923..aad7dbecad0e 100644 --- a/gcloud/bigtable/row.py +++ b/gcloud/bigtable/row.py @@ -845,7 +845,7 @@ def _parse_rmw_row_response(row_response): } """ result = {} - for column_family in row_response.families: + for column_family in row_response.row.families: column_family_id, curr_family = _parse_family_pb(column_family) result[column_family_id] = curr_family return result diff --git a/gcloud/bigtable/test_row.py b/gcloud/bigtable/test_row.py index b5f486cbec0c..ff18945acafb 100644 --- a/gcloud/bigtable/test_row.py +++ b/gcloud/bigtable/test_row.py @@ -698,7 +698,7 @@ def test_it(self): ], }, } - sample_input = _RowPB( + response_row = _RowPB( families=[ _FamilyPB( name=col_fam1, @@ -743,6 +743,7 @@ def test_it(self): ), ], ) + sample_input = _ReadModifyWriteRowResponsePB(row=response_row) self.assertEqual(expected_output, self._callFUT(sample_input)) @@ -827,6 +828,12 @@ def _ReadModifyWriteRowRequestPB(*args, **kw): return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw) +def _ReadModifyWriteRowResponsePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) + + def _CellPB(*args, **kw): from gcloud.bigtable._generated_v2 import ( data_pb2 as data_v2_pb2) From 0e80bdd58c7b1366dfbd80c0be96d4cd1c20239e Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 18:30:33 -0400 Subject: [PATCH 064/103] happybase.Table.scan: proocess all rows from a single request. --- gcloud/bigtable/happybase/table.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/gcloud/bigtable/happybase/table.py b/gcloud/bigtable/happybase/table.py index 8aea3ca42241..df3d2b075d75 100644 --- a/gcloud/bigtable/happybase/table.py +++ b/gcloud/bigtable/happybase/table.py @@ -424,11 +424,12 @@ def scan(self, row_start=None, row_stop=None, row_prefix=None, while True: try: partial_rows_data.consume_next() - row_key, curr_row_data = rows_dict.popitem() - # NOTE: We expect len(rows_dict) == 0, but don't check it. - curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp) - yield (row_key, curr_row_dict) + for row_key in sorted(rows_dict): + curr_row_data = rows_dict.pop(row_key) + # NOTE: We expect len(rows_dict) == 0, but don't check it. + curr_row_dict = _partial_row_to_dict( + curr_row_data, include_timestamp=include_timestamp) + yield (row_key, curr_row_dict) except StopIteration: break From d18a31634f592e96585221c736763d14124f045f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 28 Jun 2016 21:40:50 -0400 Subject: [PATCH 065/103] Valet service. --- gcloud/bigtable/happybase/table.py | 81 +++++++++++--------- gcloud/bigtable/happybase/test_connection.py | 4 +- gcloud/bigtable/happybase/test_pool.py | 7 +- gcloud/bigtable/test_table.py | 2 +- 4 files changed, 51 insertions(+), 43 deletions(-) diff --git a/gcloud/bigtable/happybase/table.py b/gcloud/bigtable/happybase/table.py index df3d2b075d75..e35bb8090494 100644 --- a/gcloud/bigtable/happybase/table.py +++ b/gcloud/bigtable/happybase/table.py @@ -378,42 +378,8 @@ def scan(self, row_start=None, row_stop=None, row_prefix=None, :class:`TypeError ` if a string ``filter`` is used. """ - filter_ = kwargs.pop('filter', None) - legacy_args = [] - for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): - if kw_name in kwargs: - legacy_args.append(kw_name) - kwargs.pop(kw_name) - if legacy_args: - legacy_args = ', '.join(legacy_args) - message = ('The HappyBase legacy arguments %s were used. These ' - 'arguments are unused by gcloud.' % (legacy_args,)) - _WARN(message) - if kwargs: - raise TypeError('Received unexpected arguments', kwargs.keys()) - - if limit is not None and limit < 1: - raise ValueError('limit must be positive') - if row_prefix is not None: - if row_start is not None or row_stop is not None: - raise ValueError('row_prefix cannot be combined with ' - 'row_start or row_stop') - row_start = row_prefix - row_stop = _string_successor(row_prefix) - - filters = [] - if isinstance(filter_, six.string_types): - raise TypeError('Specifying filters as a string is not supported ' - 'by Cloud Bigtable. Use a ' - 'gcloud.bigtable.row.RowFilter instead.') - elif filter_ is not None: - filters.append(filter_) - - if columns is not None: - filters.append(_columns_filter_helper(columns)) - # versions == 1 since we only want the latest. - filter_chain = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) + row_start, row_stop, filter_chain = _scan_filter_helper( + row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs) partial_rows_data = self._low_level_table.read_rows( start_key=row_start, end_key=row_stop, @@ -912,6 +878,49 @@ def _filter_chain_helper(column=None, versions=None, timestamp=None, return RowFilterChain(filters=filters) +def _scan_filter_helper(row_start, row_stop, row_prefix, columns, + timestamp, limit, kwargs): + """Helper for :meth:`scan`: build up a filter chain.""" + filter_ = kwargs.pop('filter', None) + legacy_args = [] + for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): + if kw_name in kwargs: + legacy_args.append(kw_name) + kwargs.pop(kw_name) + if legacy_args: + legacy_args = ', '.join(legacy_args) + message = ('The HappyBase legacy arguments %s were used. These ' + 'arguments are unused by gcloud.' % (legacy_args,)) + _WARN(message) + if kwargs: + raise TypeError('Received unexpected arguments', kwargs.keys()) + + if limit is not None and limit < 1: + raise ValueError('limit must be positive') + if row_prefix is not None: + if row_start is not None or row_stop is not None: + raise ValueError('row_prefix cannot be combined with ' + 'row_start or row_stop') + row_start = row_prefix + row_stop = _string_successor(row_prefix) + + filters = [] + if isinstance(filter_, six.string_types): + raise TypeError('Specifying filters as a string is not supported ' + 'by Cloud Bigtable. Use a ' + 'gcloud.bigtable.row.RowFilter instead.') + elif filter_ is not None: + filters.append(filter_) + + if columns is not None: + filters.append(_columns_filter_helper(columns)) + + # versions == 1 since we only want the latest. + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) + return row_start, row_stop, filter_ + + def _columns_filter_helper(columns): """Creates a union filter for a list of columns. diff --git a/gcloud/bigtable/happybase/test_connection.py b/gcloud/bigtable/happybase/test_connection.py index 39dea64fbf5f..6236539db71f 100644 --- a/gcloud/bigtable/happybase/test_connection.py +++ b/gcloud/bigtable/happybase/test_connection.py @@ -29,8 +29,8 @@ def _helper(self, timeout=None, instances=(), failed_locations=()): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - client_with_instances = partial(_Client, instances=instances, - failed_locations=failed_locations) + client_with_instances = partial( + _Client, instances=instances, failed_locations=failed_locations) with _Monkey(MUT, Client=client_with_instances): result = self._callFUT(timeout=timeout) diff --git a/gcloud/bigtable/happybase/test_pool.py b/gcloud/bigtable/happybase/test_pool.py index d4485765e8c5..50212927c0b5 100644 --- a/gcloud/bigtable/happybase/test_pool.py +++ b/gcloud/bigtable/happybase/test_pool.py @@ -33,7 +33,7 @@ def test_constructor_defaults(self): size = 11 instance_copy = _Instance() all_copies = [instance_copy] * size - instance = _Instance(copies=all_copies) # Avoid implicit environ check. + instance = _Instance(all_copies) # Avoid implicit environ check. pool = self._makeOne(size, instance=instance) self.assertTrue(isinstance(pool._lock, type(threading.Lock()))) @@ -79,8 +79,7 @@ def open(self): instance_copy1 = _Instance() instance_copy2 = _Instance() instance_copy3 = _Instance() - instance = _Instance( - copies=[instance_copy1, instance_copy2, instance_copy3]) + instance = _Instance([instance_copy1, instance_copy2, instance_copy3]) connection = ConnectionWithOpen(autoconnect=False, instance=instance) self.assertFalse(connection._open_called) self.assertTrue(connection._instance is instance_copy1) @@ -106,7 +105,7 @@ def test_constructor_infers_instance(self): size = 1 instance_copy = _Instance() all_copies = [instance_copy] * size - instance = _Instance(copies=all_copies) + instance = _Instance(all_copies) get_instance_calls = [] def mock_get_instance(timeout=None): diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index 2ac3eb1b001e..1494b3917d91 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -262,7 +262,7 @@ def mock_create_row_request(table_name, row_key, filter_): # Create response_iterator if chunks is None: - response_iterator = iter(()) # no responses at all + response_iterator = iter(()) # no responses at all else: response_pb = _ReadRowsResponsePB(chunks=chunks) response_iterator = iter([response_pb]) From e0f59faf44d84e326f2df294ecce8cdd012fd3a1 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 29 Jun 2016 14:27:19 -0400 Subject: [PATCH 066/103] Drop declaration of univeral wheel support. --- setup.cfg | 3 --- 1 file changed, 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 1a5473d09c3d..4a38504c6476 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,2 @@ [nosetests] exclude-dir = system_tests - -[bdist_wheel] -universal = 1 From 3c6e03ce5527ccb48278763bba0e9f5fbd014884 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 29 Jun 2016 14:29:13 -0400 Subject: [PATCH 067/103] Don't release wheels from Travis. Pending resolution of #1879 et al. --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 675a063c64ea..e27ac90c55a3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,7 +27,8 @@ deploy: repo: GoogleCloudPlatform/gcloud-python # until this is fixed: https://github.com/travis-ci/travis-ci/issues/1675 all_branches: true - distributions: "sdist bdist_wheel" + # 'bdist_wheel' builds disabled until #1879 et al. are resolved. + distributions: "sdist" cache: directories: From 05230c3443674429657300ad575ec27f13941cf9 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 29 Jun 2016 15:56:05 -0400 Subject: [PATCH 068/103] Prepare 0.17.0 release. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 045fcc4bfc32..646c86abf3be 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ setup( name='gcloud', - version='0.16.0', + version='0.17.0', description='API Client library for Google Cloud', author='Google Cloud Platform', author_email='jjg+gcloud-python@google.com', From 7a6b1c97f10080e376c7f0e7f8484c33286b3481 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 29 Jun 2016 16:07:44 -0400 Subject: [PATCH 069/103] Fix typos in docs. --- docs/logging-usage.rst | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/logging-usage.rst b/docs/logging-usage.rst index ec8f157cec5c..d0acc528f0f2 100644 --- a/docs/logging-usage.rst +++ b/docs/logging-usage.rst @@ -211,13 +211,13 @@ Export log entries using sinks Sinks allow exporting entries which match a given filter to Cloud Storage buckets, BigQuery datasets, or Cloud Pub/Sub topics. -Export to Cloud storage +Export to Cloud Storage ~~~~~~~~~~~~~~~~~~~~~~~ Make sure that the storage bucket you want to export logs too has -`cloud-logs@google.com` as the owner. See `Set permission for writing exported logs`_. +``cloud-logs@google.com`` as the owner. See `Set permission for writing exported logs`_. -Add `cloud-logs@google.com` as the owner of `my-bucket-name`: +Add ``cloud-logs@google.com`` as the owner of ``my-bucket-name``: .. doctest:: @@ -236,11 +236,12 @@ Export to BigQuery ~~~~~~~~~~~~~~~~~~ To export logs to BigQuery you must log into the Cloud Platform Console -and add `cloud-logs@google.com` to a dataset. +and add ``cloud-logs@google.com`` to a dataset. See: `Setting permissions for BigQuery`_ .. doctest:: + >>> from gcloud import bigquery >>> from gcloud.bigquery.dataset import AccessGrant >>> bigquery_client = bigquery.Client() @@ -259,11 +260,12 @@ Export to Pub/Sub ~~~~~~~~~~~~~~~~~ To export logs to BigQuery you must log into the Cloud Platform Console -and add `cloud-logs@google.com` to a topic. +and add ``cloud-logs@google.com`` to a topic. See: `Setting permissions for Pub/Sub`_ .. doctest:: + >>> from gcloud import pubsub >>> client = pubsub.Client() >>> topic = client.topic('your-topic-name') From bf5e637f7d1203717def17cdf02e9d7bfa6a1e78 Mon Sep 17 00:00:00 2001 From: Misha Brukman Date: Wed, 29 Jun 2016 19:37:52 -0400 Subject: [PATCH 070/103] Fixed spelling and formatting. --- docs/bigtable-instance-api.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/bigtable-instance-api.rst b/docs/bigtable-instance-api.rst index c2fd1402a97b..7cf3f2e9c860 100644 --- a/docs/bigtable-instance-api.rst +++ b/docs/bigtable-instance-api.rst @@ -10,16 +10,16 @@ Instance Admin API After creating a :class:`Client `, you can interact with individual instances for a project. -List Intances -------------- +List Instances +-------------- -If you want a comprehensive list of all existing intances, make a +If you want a comprehensive list of all existing instances, make a `ListInstances`_ API request with -:meth:`Client.list_intances() `: +:meth:`Client.list_instances() `: .. code:: python - intances = client.list_intances() + instances = client.list_instances() Instance Factory ---------------- @@ -59,7 +59,7 @@ Check on Current Operation API will return a `long-running operation`_ and a corresponding :class:`Operation ` object will be returned by - :meth:`create() ``. + :meth:`create() `. You can check if a long-running operation (for a :meth:`create() ` has finished From 3e9928c25f94169b855ff11fdcd9d467437f69c8 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 29 Jun 2016 15:28:19 -0400 Subject: [PATCH 071/103] Document required-for-new-instances 'location_id' parameter. --- docs/bigtable-instance-api.rst | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/docs/bigtable-instance-api.rst b/docs/bigtable-instance-api.rst index 7cf3f2e9c860..2dd7cbfd1174 100644 --- a/docs/bigtable-instance-api.rst +++ b/docs/bigtable-instance-api.rst @@ -28,10 +28,15 @@ To create a :class:`Instance ` object: .. code:: python - instance = client.instance(instance_id, display_name=display_name) + instance = client.instance(instance_id, location_id, + display_name=display_name) -``display_name`` is optional. When not provided, -``display_name`` defaults to the ``instance_id`` value. +- ``location_id`` is the ID of the location in which the instance's cluster + will be hosted, e.g. ``'us-central1-c'``. ``location_id`` is required for + instances which do not already exist. + +- ``display_name`` is optional. When not provided, ``display_name`` defaults + to the ``instance_id`` value. Even if this :class:`Instance ` already has been created with the API, you'll want this object to use as a From 5ad2861102c7c24b0c2ef8816db90258fae4f26a Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 29 Jun 2016 15:31:48 -0400 Subject: [PATCH 072/103] Replace somewhat convoluted paragraph. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1932#discussion-diff-69009158 --- docs/bigtable-instance-api.rst | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/bigtable-instance-api.rst b/docs/bigtable-instance-api.rst index 2dd7cbfd1174..39bfd1fae5fa 100644 --- a/docs/bigtable-instance-api.rst +++ b/docs/bigtable-instance-api.rst @@ -38,11 +38,13 @@ To create a :class:`Instance ` object: - ``display_name`` is optional. When not provided, ``display_name`` defaults to the ``instance_id`` value. -Even if this :class:`Instance ` already -has been created with the API, you'll want this object to use as a -parent of a :class:`Table ` just as the -:class:`Client ` is used as the parent of -a :class:`Instance `. +You can also use :meth:`Client.instance` to create a local wrapper for +instances already has been created with the API, or through the web conole: + +.. code:: python + + instance = client.instance(existing_instance_id) + instance.reload() Create a new Instance --------------------- From 0c521165543391ef598728240cabcbf400df6542 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 29 Jun 2016 17:18:04 -0400 Subject: [PATCH 073/103] Typo fix. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1932#discussion-diff-69027025 --- gcloud/bigtable/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index cf25d05f2a0d..35ca65ce212a 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -18,7 +18,7 @@ In the hierarchy of API concepts -* a :class:`Client` owns a :class:`.Instance` +* a :class:`Client` owns an :class:`.Instance` * a :class:`.Instance` owns a :class:`Table ` * a :class:`Table ` owns a :class:`ColumnFamily <.column_family.ColumnFamily>` From 081603c34d6d994425003dec2c78092edc1b19e2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 30 Jun 2016 10:02:00 -0400 Subject: [PATCH 074/103] Rephrase again. Addresses: https://github.com/GoogleCloudPlatform/gcloud-python/pull/1948#discussion-diff-69135147 --- docs/bigtable-instance-api.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/bigtable-instance-api.rst b/docs/bigtable-instance-api.rst index 39bfd1fae5fa..4d03fa7b1b97 100644 --- a/docs/bigtable-instance-api.rst +++ b/docs/bigtable-instance-api.rst @@ -39,7 +39,8 @@ To create a :class:`Instance ` object: to the ``instance_id`` value. You can also use :meth:`Client.instance` to create a local wrapper for -instances already has been created with the API, or through the web conole: +instances that have already been created with the API, or through the web +conole: .. code:: python From 3dd188ff13988fadcade07115751b759751098f3 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 30 Jun 2016 10:03:36 -0400 Subject: [PATCH 075/103] Overlooked 'cluster' -> 'instance' rename in docs. --- docs/bigtable-usage.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/bigtable-usage.rst b/docs/bigtable-usage.rst index 61966ac21795..14cadd084f72 100644 --- a/docs/bigtable-usage.rst +++ b/docs/bigtable-usage.rst @@ -18,8 +18,8 @@ Get started by learning about the In the hierarchy of API concepts * a :class:`Client ` owns a - :class:`Cluster ` -* a :class:`Cluster ` owns a + :class:`Cluster ` * a :class:`Table ` owns a :class:`ColumnFamily ` From ed0f77a0cf99affbe331b35fc45dde16b370a2a4 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 1 Jul 2016 11:20:48 -0600 Subject: [PATCH 076/103] Re-enable bundling. Closes #1911 --- gcloud/_testing.py | 14 ++++++++++++++ gcloud/pubsub/_gax.py | 14 ++++++++++---- gcloud/pubsub/test__gax.py | 33 +++++++++++++++++++++++++++++---- 3 files changed, 53 insertions(+), 8 deletions(-) diff --git a/gcloud/_testing.py b/gcloud/_testing.py index 0a440e817436..15ef5dd298e1 100644 --- a/gcloud/_testing.py +++ b/gcloud/_testing.py @@ -59,3 +59,17 @@ def __init__(self, items, page_token): def next(self): items, self._items = self._items, None return items + + +class _GAXBundlingEvent(object): + + result = None + + def __init__(self, result): + self._result = result + + def is_set(self): + return self.result is not None + + def wait(self, *_): + self.result = self._result diff --git a/gcloud/pubsub/_gax.py b/gcloud/pubsub/_gax.py index 28ac6c23e294..ce105f69fc5a 100644 --- a/gcloud/pubsub/_gax.py +++ b/gcloud/pubsub/_gax.py @@ -162,17 +162,23 @@ def topic_publish(self, topic_path, messages): :raises: :exc:`gcloud.exceptions.NotFound` if the topic does not exist """ - options = CallOptions(is_bundling=False) + options = CallOptions(is_bundling=True) message_pbs = [_message_pb_from_dict(message) for message in messages] try: - result = self._gax_api.publish(topic_path, message_pbs, - options=options) + # result = self._gax_api.publish(topic_path, message_pbs, + # options=options) + + event = self._gax_api.publish(topic_path, message_pbs, + options=options) + if not event.is_set(): + event.wait() except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise - return result.message_ids + # return result.message_ids + return event.result.message_ids def topic_list_subscriptions(self, topic_path, page_size=0, page_token=None): diff --git a/gcloud/pubsub/test__gax.py b/gcloud/pubsub/test__gax.py index 2426d2dfb7e8..98d4d4e7bbcd 100644 --- a/gcloud/pubsub/test__gax.py +++ b/gcloud/pubsub/test__gax.py @@ -204,12 +204,15 @@ def test_topic_delete_error(self): def test_topic_publish_hit(self): import base64 + from gcloud._testing import _GAXBundlingEvent PAYLOAD = b'This is the message text' B64 = base64.b64encode(PAYLOAD).decode('ascii') MSGID = 'DEADBEEF' MESSAGE = {'data': B64, 'attributes': {}} response = _PublishResponsePB([MSGID]) - gax_api = _GAXPublisherAPI(_publish_response=response) + event = _GAXBundlingEvent(response) + event.wait() # already received result + gax_api = _GAXPublisherAPI(_publish_response=event) api = self._makeOne(gax_api) resource = api.topic_publish(self.TOPIC_PATH, [MESSAGE]) @@ -220,7 +223,29 @@ def test_topic_publish_hit(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options.is_bundling, False) + self.assertEqual(options.is_bundling, True) + + def test_topic_publish_hit_with_wait(self): + import base64 + from gcloud._testing import _GAXBundlingEvent + PAYLOAD = b'This is the message text' + B64 = base64.b64encode(PAYLOAD).decode('ascii') + MSGID = 'DEADBEEF' + MESSAGE = {'data': B64, 'attributes': {}} + response = _PublishResponsePB([MSGID]) + event = _GAXBundlingEvent(response) + gax_api = _GAXPublisherAPI(_publish_response=event) + api = self._makeOne(gax_api) + + resource = api.topic_publish(self.TOPIC_PATH, [MESSAGE]) + + self.assertEqual(resource, [MSGID]) + topic_path, message_pbs, options = gax_api._publish_called_with + self.assertEqual(topic_path, self.TOPIC_PATH) + message_pb, = message_pbs + self.assertEqual(message_pb.data, B64) + self.assertEqual(message_pb.attributes, {}) + self.assertEqual(options.is_bundling, True) def test_topic_publish_miss_w_attrs_w_bytes_payload(self): import base64 @@ -239,7 +264,7 @@ def test_topic_publish_miss_w_attrs_w_bytes_payload(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {'foo': 'bar'}) - self.assertEqual(options.is_bundling, False) + self.assertEqual(options.is_bundling, True) def test_topic_publish_error(self): import base64 @@ -258,7 +283,7 @@ def test_topic_publish_error(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options.is_bundling, False) + self.assertEqual(options.is_bundling, True) def test_topic_list_subscriptions_no_paging(self): from google.gax import INITIAL_PAGE From da259a8ec77caa424d14d7e2f4f86b5a666cff50 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 1 Jul 2016 12:20:39 -0600 Subject: [PATCH 077/103] Remove CallOptions and missed commented code. --- gcloud/pubsub/_gax.py | 8 +------- gcloud/pubsub/test__gax.py | 8 ++++---- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/gcloud/pubsub/_gax.py b/gcloud/pubsub/_gax.py index ce105f69fc5a..0639833feb73 100644 --- a/gcloud/pubsub/_gax.py +++ b/gcloud/pubsub/_gax.py @@ -162,22 +162,16 @@ def topic_publish(self, topic_path, messages): :raises: :exc:`gcloud.exceptions.NotFound` if the topic does not exist """ - options = CallOptions(is_bundling=True) message_pbs = [_message_pb_from_dict(message) for message in messages] try: - # result = self._gax_api.publish(topic_path, message_pbs, - # options=options) - - event = self._gax_api.publish(topic_path, message_pbs, - options=options) + event = self._gax_api.publish(topic_path, message_pbs) if not event.is_set(): event.wait() except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise - # return result.message_ids return event.result.message_ids def topic_list_subscriptions(self, topic_path, page_size=0, diff --git a/gcloud/pubsub/test__gax.py b/gcloud/pubsub/test__gax.py index 98d4d4e7bbcd..d285cb6e3260 100644 --- a/gcloud/pubsub/test__gax.py +++ b/gcloud/pubsub/test__gax.py @@ -223,7 +223,7 @@ def test_topic_publish_hit(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options.is_bundling, True) + self.assertEqual(options, None) def test_topic_publish_hit_with_wait(self): import base64 @@ -245,7 +245,7 @@ def test_topic_publish_hit_with_wait(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options.is_bundling, True) + self.assertEqual(options, None) def test_topic_publish_miss_w_attrs_w_bytes_payload(self): import base64 @@ -264,7 +264,7 @@ def test_topic_publish_miss_w_attrs_w_bytes_payload(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {'foo': 'bar'}) - self.assertEqual(options.is_bundling, True) + self.assertEqual(options, None) def test_topic_publish_error(self): import base64 @@ -283,7 +283,7 @@ def test_topic_publish_error(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options.is_bundling, True) + self.assertEqual(options, None) def test_topic_list_subscriptions_no_paging(self): from google.gax import INITIAL_PAGE From dedbaf1972934d85ca660284225d7626c4d36314 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 1 Jul 2016 14:32:30 -0400 Subject: [PATCH 078/103] Pass project path to GAX 'list_sinks'/'list_log_metrics'. --- gcloud/logging/_gax.py | 6 ++++-- gcloud/logging/test__gax.py | 8 ++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/gcloud/logging/_gax.py b/gcloud/logging/_gax.py index 52ec001e088f..d8e3a033b1d0 100644 --- a/gcloud/logging/_gax.py +++ b/gcloud/logging/_gax.py @@ -154,7 +154,8 @@ def list_sinks(self, project, page_size=0, page_token=None): with another call (pass that value as ``page_token``). """ options = _build_paging_options(page_token) - page_iter = self._gax_api.list_sinks(project, page_size, options) + path = 'projects/%s' % (project,) + page_iter = self._gax_api.list_sinks(path, page_size, options) sinks = [_log_sink_pb_to_mapping(log_sink_pb) for log_sink_pb in page_iter.next()] token = page_iter.page_token or None @@ -289,7 +290,8 @@ def list_metrics(self, project, page_size=0, page_token=None): with another call (pass that value as ``page_token``). """ options = _build_paging_options(page_token) - page_iter = self._gax_api.list_log_metrics(project, page_size, options) + path = 'projects/%s' % (project,) + page_iter = self._gax_api.list_log_metrics(path, page_size, options) metrics = [_log_metric_pb_to_mapping(log_metric_pb) for log_metric_pb in page_iter.next()] token = page_iter.page_token or None diff --git a/gcloud/logging/test__gax.py b/gcloud/logging/test__gax.py index 3174771d5989..c6c4adc70353 100644 --- a/gcloud/logging/test__gax.py +++ b/gcloud/logging/test__gax.py @@ -439,7 +439,7 @@ def test_list_sinks_no_paging(self): self.assertEqual(token, TOKEN) project, page_size, options = gax_api._list_sinks_called_with - self.assertEqual(project, self.PROJECT) + self.assertEqual(project, self.PROJECT_PATH) self.assertEqual(page_size, 0) self.assertEqual(options.page_token, INITIAL_PAGE) @@ -465,7 +465,7 @@ def test_list_sinks_w_paging(self): self.assertEqual(token, None) project, page_size, options = gax_api._list_sinks_called_with - self.assertEqual(project, self.PROJECT) + self.assertEqual(project, self.PROJECT_PATH) self.assertEqual(page_size, PAGE_SIZE) self.assertEqual(options.page_token, TOKEN) @@ -643,7 +643,7 @@ def test_list_metrics_no_paging(self): self.assertEqual(token, TOKEN) project, page_size, options = gax_api._list_log_metrics_called_with - self.assertEqual(project, self.PROJECT) + self.assertEqual(project, self.PROJECT_PATH) self.assertEqual(page_size, 0) self.assertEqual(options.page_token, INITIAL_PAGE) @@ -669,7 +669,7 @@ def test_list_metrics_w_paging(self): self.assertEqual(token, None) project, page_size, options = gax_api._list_log_metrics_called_with - self.assertEqual(project, self.PROJECT) + self.assertEqual(project, self.PROJECT_PATH) self.assertEqual(page_size, PAGE_SIZE) self.assertEqual(options.page_token, TOKEN) From 5646bd568fac56985f1d52b74141f6da516dc801 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 1 Jul 2016 15:31:17 -0400 Subject: [PATCH 079/103] Moar impedance mapping: JSON camelCase vs. proto with_underscore. --- gcloud/logging/_gax.py | 14 +++++++------- gcloud/logging/test__gax.py | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/gcloud/logging/_gax.py b/gcloud/logging/_gax.py index d8e3a033b1d0..074b34370dc4 100644 --- a/gcloud/logging/_gax.py +++ b/gcloud/logging/_gax.py @@ -440,15 +440,15 @@ def _log_entry_pb_to_mapping(entry_pb): if entry_pb.http_request: request = entry_pb.http_request mapping['httpRequest'] = { - 'request_method': request.request_method, - 'request_url': request.request_url, + 'requestMethod': request.request_method, + 'requestUrl': request.request_url, 'status': request.status, 'referer': request.referer, - 'user_agent': request.user_agent, - 'cache_hit': request.cache_hit, - 'request_size': request.request_size, - 'response_size': request.response_size, - 'remote_ip': request.remote_ip, + 'userAgent': request.user_agent, + 'cacheHit': request.cache_hit, + 'requestSize': request.request_size, + 'responseSize': request.response_size, + 'remoteIp': request.remote_ip, } if entry_pb.operation: diff --git a/gcloud/logging/test__gax.py b/gcloud/logging/test__gax.py index c6c4adc70353..0b3688c31726 100644 --- a/gcloud/logging/test__gax.py +++ b/gcloud/logging/test__gax.py @@ -154,15 +154,15 @@ def test_list_entries_with_extra_properties(self): self.assertEqual(entry['insertId'], IID) self.assertEqual(entry['timestamp'], _datetime_to_rfc3339(NOW)) EXPECTED_REQUEST = { - 'request_method': request.request_method, - 'request_url': request.request_url, + 'requestMethod': request.request_method, + 'requestUrl': request.request_url, 'status': request.status, - 'request_size': request.request_size, - 'response_size': request.response_size, + 'requestSize': request.request_size, + 'responseSize': request.response_size, 'referer': request.referer, - 'user_agent': request.user_agent, - 'remote_ip': request.remote_ip, - 'cache_hit': request.cache_hit, + 'userAgent': request.user_agent, + 'remoteIp': request.remote_ip, + 'cacheHit': request.cache_hit, } self.assertEqual(entry['httpRequest'], EXPECTED_REQUEST) EXPECTED_OPERATION = { From 3a3613e18d7c6dad9da4bf7a56d45808571e387d Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 1 Jul 2016 16:45:55 -0400 Subject: [PATCH 080/103] Add backoff to 'list_entries' calls for eventual consistency. Use integer for HTTP status code (the gRPC version cares). --- system_tests/logging_.py | 63 ++++++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/system_tests/logging_.py b/system_tests/logging_.py index db29f854d4d0..683eca20bece 100644 --- a/system_tests/logging_.py +++ b/system_tests/logging_.py @@ -33,6 +33,32 @@ TOPIC_NAME = 'gcloud-python-system-testing%s' % (_RESOURCE_ID,) +def _retry_backoff(result_predicate, meth, *args, **kw): + from grpc.beta.interfaces import StatusCode + from grpc.framework.interfaces.face.face import AbortionError + backoff_intervals = [1, 2, 4, 8] + while True: + try: + result = meth(*args, **kw) + except AbortionError as error: + if error.code != StatusCode.UNAVAILABLE: + raise + if backoff_intervals: + time.sleep(backoff_intervals.pop(0)) + else: + raise + if result_predicate(result): + return result + if backoff_intervals: + time.sleep(backoff_intervals.pop(0)) + else: + raise RuntimeError('%s: %s %s' % (meth, args, kw)) + + +def _has_entries(result): + return len(result[0]) > 0 + + class Config(object): """Run-time configuration to be modified at set-up. @@ -75,8 +101,7 @@ def test_log_text(self): logger = Config.CLIENT.logger(self._logger_name()) self.to_delete.append(logger) logger.log_text(TEXT_PAYLOAD) - time.sleep(2) - entries, _ = logger.list_entries() + entries, _ = _retry_backoff(_has_entries, logger.list_entries) self.assertEqual(len(entries), 1) self.assertEqual(entries[0].payload, TEXT_PAYLOAD) @@ -86,7 +111,7 @@ def test_log_text_w_metadata(self): SEVERITY = 'INFO' METHOD = 'POST' URI = 'https://api.example.com/endpoint' - STATUS = '500' + STATUS = 500 REQUEST = { 'requestMethod': METHOD, 'requestUrl': URI, @@ -94,18 +119,22 @@ def test_log_text_w_metadata(self): } logger = Config.CLIENT.logger(self._logger_name()) self.to_delete.append(logger) + logger.log_text(TEXT_PAYLOAD, insert_id=INSERT_ID, severity=SEVERITY, http_request=REQUEST) - time.sleep(2) - entries, _ = logger.list_entries() + entries, _ = _retry_backoff(_has_entries, logger.list_entries) + self.assertEqual(len(entries), 1) - self.assertEqual(entries[0].payload, TEXT_PAYLOAD) - self.assertEqual(entries[0].insert_id, INSERT_ID) - self.assertEqual(entries[0].severity, SEVERITY) - request = entries[0].http_request + + entry = entries[0] + self.assertEqual(entry.payload, TEXT_PAYLOAD) + self.assertEqual(entry.insert_id, INSERT_ID) + self.assertEqual(entry.severity, SEVERITY) + + request = entry.http_request self.assertEqual(request['requestMethod'], METHOD) self.assertEqual(request['requestUrl'], URI) - self.assertEqual(request['status'], int(STATUS)) + self.assertEqual(request['status'], STATUS) def test_log_struct(self): JSON_PAYLOAD = { @@ -114,9 +143,10 @@ def test_log_struct(self): } logger = Config.CLIENT.logger(self._logger_name()) self.to_delete.append(logger) + logger.log_struct(JSON_PAYLOAD) - time.sleep(2) - entries, _ = logger.list_entries() + entries, _ = _retry_backoff(_has_entries, logger.list_entries) + self.assertEqual(len(entries), 1) self.assertEqual(entries[0].payload, JSON_PAYLOAD) @@ -129,7 +159,7 @@ def test_log_struct_w_metadata(self): SEVERITY = 'INFO' METHOD = 'POST' URI = 'https://api.example.com/endpoint' - STATUS = '500' + STATUS = 500 REQUEST = { 'requestMethod': METHOD, 'requestUrl': URI, @@ -137,10 +167,11 @@ def test_log_struct_w_metadata(self): } logger = Config.CLIENT.logger(self._logger_name()) self.to_delete.append(logger) + logger.log_struct(JSON_PAYLOAD, insert_id=INSERT_ID, severity=SEVERITY, http_request=REQUEST) - time.sleep(2) - entries, _ = logger.list_entries() + entries, _ = _retry_backoff(_has_entries, logger.list_entries) + self.assertEqual(len(entries), 1) self.assertEqual(entries[0].payload, JSON_PAYLOAD) self.assertEqual(entries[0].insert_id, INSERT_ID) @@ -148,7 +179,7 @@ def test_log_struct_w_metadata(self): request = entries[0].http_request self.assertEqual(request['requestMethod'], METHOD) self.assertEqual(request['requestUrl'], URI) - self.assertEqual(request['status'], int(STATUS)) + self.assertEqual(request['status'], STATUS) def test_create_metric(self): metric = Config.CLIENT.metric( From d662dd9ae50a9f9ed564eb3e8c98f653b71a73a0 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 1 Jul 2016 16:47:58 -0400 Subject: [PATCH 081/103] Properly unpack JSON payload in GAX 'list_entries'. --- gcloud/logging/_gax.py | 46 ++++++++++++++++++-- gcloud/logging/test__gax.py | 83 ++++++++++++++++++++++++++++++++++++- 2 files changed, 125 insertions(+), 4 deletions(-) diff --git a/gcloud/logging/_gax.py b/gcloud/logging/_gax.py index 074b34370dc4..9bf32b2a4695 100644 --- a/gcloud/logging/_gax.py +++ b/gcloud/logging/_gax.py @@ -418,6 +418,41 @@ def _pb_timestamp_to_rfc3339(timestamp_pb): return _datetime_to_rfc3339(timestamp) +def _value_pb_to_value(value_pb): + """Helper for :func:`_log_entry_pb_to_mapping`.""" + kind = value_pb.WhichOneof('kind') + + if kind is None: + result = None + + elif kind == 'string_value': + result = value_pb.string_value + + elif kind == 'bool_value': + result = value_pb.bool_value + + elif kind == 'number_value': + result = value_pb.number_value + + elif kind == 'list_value': + result = [_value_pb_to_value(element) + for element in value_pb.list_value.values] + + elif kind == 'struct_value': + result = _struct_pb_to_mapping(value_pb.struct_value) + + else: + raise ValueError('Value protobuf had unknown kind: %s' % (kind,)) + + return result + + +def _struct_pb_to_mapping(struct_pb): + """Helper for :func:`_log_entry_pb_to_mapping`.""" + return dict([(key, _value_pb_to_value(struct_pb.fields[key])) + for key in struct_pb.fields]) + + def _log_entry_pb_to_mapping(entry_pb): """Helper for :meth:`list_entries`, et aliae @@ -432,10 +467,15 @@ def _log_entry_pb_to_mapping(entry_pb): 'insertId': entry_pb.insert_id, 'timestamp': _pb_timestamp_to_rfc3339(entry_pb.timestamp), 'labels': entry_pb.labels, - 'textPayload': entry_pb.text_payload, - 'jsonPayload': entry_pb.json_payload, - 'protoPayload': entry_pb.proto_payload, } + if entry_pb.HasField('text_payload'): + mapping['textPayload'] = entry_pb.text_payload + + if entry_pb.HasField('json_payload'): + mapping['jsonPayload'] = _struct_pb_to_mapping(entry_pb.json_payload) + + if entry_pb.HasField('proto_payload'): + mapping['protoPayload'] = entry_pb.proto_payload if entry_pb.http_request: request = entry_pb.http_request diff --git a/gcloud/logging/test__gax.py b/gcloud/logging/test__gax.py index 0b3688c31726..a87b8b7e9682 100644 --- a/gcloud/logging/test__gax.py +++ b/gcloud/logging/test__gax.py @@ -78,13 +78,16 @@ def test_list_entries_no_paging(self): self.assertTrue(options.page_token is INITIAL_PAGE) def test_list_entries_with_paging(self): + from google.protobuf.struct_pb2 import Value from gcloud._testing import _GAXPageIterator SIZE = 23 TOKEN = 'TOKEN' NEW_TOKEN = 'NEW_TOKEN' PAYLOAD = {'message': 'MESSAGE', 'weather': 'sunny'} + struct_pb = _StructPB(dict([(key, Value(string_value=value)) + for key, value in PAYLOAD.items()])) response = _GAXPageIterator( - [_LogEntryPB(self.LOG_NAME, json_payload=PAYLOAD)], NEW_TOKEN) + [_LogEntryPB(self.LOG_NAME, json_payload=struct_pb)], NEW_TOKEN) gax_api = _GAXLoggingAPI(_list_log_entries_response=response) api = self._makeOne(gax_api) @@ -811,6 +814,75 @@ def test_metric_delete_hit(self): self.assertEqual(options, None) +@unittest2.skipUnless(_HAVE_GAX, 'No gax-python') +class Test_value_pb_to_value(_Base, unittest2.TestCase): + + def _callFUT(self, value_pb): + from gcloud.logging._gax import _value_pb_to_value + return _value_pb_to_value(value_pb) + + def test_w_null_values(self): + from google.protobuf.struct_pb2 import Value + value_pb = Value() + self.assertEqual(self._callFUT(value_pb), None) + value_pb = Value(null_value=None) + self.assertEqual(self._callFUT(value_pb), None) + + def test_w_string_value(self): + from google.protobuf.struct_pb2 import Value + STRING = 'STRING' + value_pb = Value(string_value=STRING) + self.assertEqual(self._callFUT(value_pb), STRING) + + def test_w_bool_values(self): + from google.protobuf.struct_pb2 import Value + true_value_pb = Value(bool_value=True) + self.assertTrue(self._callFUT(true_value_pb) is True) + false_value_pb = Value(bool_value=False) + self.assertTrue(self._callFUT(false_value_pb) is False) + + def test_w_number_values(self): + from google.protobuf.struct_pb2 import Value + ANSWER = 42 + PI = 3.1415926 + int_value_pb = Value(number_value=ANSWER) + self.assertEqual(self._callFUT(int_value_pb), ANSWER) + float_value_pb = Value(number_value=PI) + self.assertEqual(self._callFUT(float_value_pb), PI) + + def test_w_list_value(self): + from google.protobuf.struct_pb2 import Value + STRING = 'STRING' + PI = 3.1415926 + value_pb = Value() + value_pb.list_value.values.add(string_value=STRING) + value_pb.list_value.values.add(bool_value=True) + value_pb.list_value.values.add(number_value=PI) + self.assertEqual(self._callFUT(value_pb), [STRING, True, PI]) + + def test_w_struct_value(self): + from google.protobuf.struct_pb2 import Value + STRING = 'STRING' + PI = 3.1415926 + value_pb = Value() + value_pb.struct_value.fields['string'].string_value = STRING + value_pb.struct_value.fields['bool'].bool_value = True + value_pb.struct_value.fields['number'].number_value = PI + self.assertEqual(self._callFUT(value_pb), + {'string': STRING, 'bool': True, 'number': PI}) + + def test_w_unknown_kind(self): + + class _Value(object): + + def WhichOneof(self, name): + assert name == 'kind' + return 'UNKNOWN' + + with self.assertRaises(ValueError): + self._callFUT(_Value()) + + class _GAXBaseAPI(object): _random_gax_error = False @@ -974,6 +1046,12 @@ def __init__(self, type_='global', **labels): self.labels = labels +class _StructPB(object): + + def __init__(self, fields): + self.fields = fields + + class _LogEntryPB(object): severity = 'DEFAULT' @@ -987,6 +1065,9 @@ def __init__(self, log_name, **kw): self.labels = kw.pop('labels', {}) self.__dict__.update(kw) + def HasField(self, field_name): + return getattr(self, field_name, None) is not None + @staticmethod def _make_timestamp(): from datetime import datetime From ac6bd5c8f85c5f7a93c712d472ebe311a5a89f48 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 1 Jul 2016 17:12:25 -0400 Subject: [PATCH 082/103] Impedance matching: JSON severity names vs. protobuf numbers. --- gcloud/logging/_gax.py | 2 +- gcloud/logging/test__gax.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/gcloud/logging/_gax.py b/gcloud/logging/_gax.py index 9bf32b2a4695..3f34645f3588 100644 --- a/gcloud/logging/_gax.py +++ b/gcloud/logging/_gax.py @@ -463,7 +463,7 @@ def _log_entry_pb_to_mapping(entry_pb): mapping = { 'logName': entry_pb.log_name, 'resource': _mon_resource_pb_to_mapping(entry_pb.resource), - 'severity': entry_pb.severity, + 'severity': LogSeverity.Name(entry_pb.severity), 'insertId': entry_pb.insert_id, 'timestamp': _pb_timestamp_to_rfc3339(entry_pb.timestamp), 'labels': entry_pb.labels, diff --git a/gcloud/logging/test__gax.py b/gcloud/logging/test__gax.py index a87b8b7e9682..d68082109cde 100644 --- a/gcloud/logging/test__gax.py +++ b/gcloud/logging/test__gax.py @@ -112,6 +112,7 @@ def test_list_entries_with_paging(self): def test_list_entries_with_extra_properties(self): from datetime import datetime + from google.logging.type.log_severity_pb2 import WARNING from gcloud._testing import _GAXPageIterator from gcloud._helpers import UTC from gcloud._helpers import _datetime_to_rfc3339 @@ -129,7 +130,7 @@ def test_list_entries_with_extra_properties(self): request = _HTTPRequestPB() operation = _LogEntryOperationPB() EXTRAS = { - 'severity': SEVERITY, + 'severity': WARNING, 'labels': LABELS, 'insert_id': IID, 'http_request': request, @@ -1054,7 +1055,7 @@ def __init__(self, fields): class _LogEntryPB(object): - severity = 'DEFAULT' + severity = 0 http_request = operation = insert_id = None text_payload = json_payload = proto_payload = None From 54b39f79022954a19af47a82c860cce8ff9eee91 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 1 Jul 2016 18:58:26 -0400 Subject: [PATCH 083/103] Revert "Re-enable bundling (for consideration)" --- gcloud/_testing.py | 14 -------------- gcloud/pubsub/_gax.py | 8 ++++---- gcloud/pubsub/test__gax.py | 33 ++++----------------------------- 3 files changed, 8 insertions(+), 47 deletions(-) diff --git a/gcloud/_testing.py b/gcloud/_testing.py index 15ef5dd298e1..0a440e817436 100644 --- a/gcloud/_testing.py +++ b/gcloud/_testing.py @@ -59,17 +59,3 @@ def __init__(self, items, page_token): def next(self): items, self._items = self._items, None return items - - -class _GAXBundlingEvent(object): - - result = None - - def __init__(self, result): - self._result = result - - def is_set(self): - return self.result is not None - - def wait(self, *_): - self.result = self._result diff --git a/gcloud/pubsub/_gax.py b/gcloud/pubsub/_gax.py index 0639833feb73..28ac6c23e294 100644 --- a/gcloud/pubsub/_gax.py +++ b/gcloud/pubsub/_gax.py @@ -162,17 +162,17 @@ def topic_publish(self, topic_path, messages): :raises: :exc:`gcloud.exceptions.NotFound` if the topic does not exist """ + options = CallOptions(is_bundling=False) message_pbs = [_message_pb_from_dict(message) for message in messages] try: - event = self._gax_api.publish(topic_path, message_pbs) - if not event.is_set(): - event.wait() + result = self._gax_api.publish(topic_path, message_pbs, + options=options) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise - return event.result.message_ids + return result.message_ids def topic_list_subscriptions(self, topic_path, page_size=0, page_token=None): diff --git a/gcloud/pubsub/test__gax.py b/gcloud/pubsub/test__gax.py index d285cb6e3260..2426d2dfb7e8 100644 --- a/gcloud/pubsub/test__gax.py +++ b/gcloud/pubsub/test__gax.py @@ -204,15 +204,12 @@ def test_topic_delete_error(self): def test_topic_publish_hit(self): import base64 - from gcloud._testing import _GAXBundlingEvent PAYLOAD = b'This is the message text' B64 = base64.b64encode(PAYLOAD).decode('ascii') MSGID = 'DEADBEEF' MESSAGE = {'data': B64, 'attributes': {}} response = _PublishResponsePB([MSGID]) - event = _GAXBundlingEvent(response) - event.wait() # already received result - gax_api = _GAXPublisherAPI(_publish_response=event) + gax_api = _GAXPublisherAPI(_publish_response=response) api = self._makeOne(gax_api) resource = api.topic_publish(self.TOPIC_PATH, [MESSAGE]) @@ -223,29 +220,7 @@ def test_topic_publish_hit(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options, None) - - def test_topic_publish_hit_with_wait(self): - import base64 - from gcloud._testing import _GAXBundlingEvent - PAYLOAD = b'This is the message text' - B64 = base64.b64encode(PAYLOAD).decode('ascii') - MSGID = 'DEADBEEF' - MESSAGE = {'data': B64, 'attributes': {}} - response = _PublishResponsePB([MSGID]) - event = _GAXBundlingEvent(response) - gax_api = _GAXPublisherAPI(_publish_response=event) - api = self._makeOne(gax_api) - - resource = api.topic_publish(self.TOPIC_PATH, [MESSAGE]) - - self.assertEqual(resource, [MSGID]) - topic_path, message_pbs, options = gax_api._publish_called_with - self.assertEqual(topic_path, self.TOPIC_PATH) - message_pb, = message_pbs - self.assertEqual(message_pb.data, B64) - self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options, None) + self.assertEqual(options.is_bundling, False) def test_topic_publish_miss_w_attrs_w_bytes_payload(self): import base64 @@ -264,7 +239,7 @@ def test_topic_publish_miss_w_attrs_w_bytes_payload(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {'foo': 'bar'}) - self.assertEqual(options, None) + self.assertEqual(options.is_bundling, False) def test_topic_publish_error(self): import base64 @@ -283,7 +258,7 @@ def test_topic_publish_error(self): message_pb, = message_pbs self.assertEqual(message_pb.data, B64) self.assertEqual(message_pb.attributes, {}) - self.assertEqual(options, None) + self.assertEqual(options.is_bundling, False) def test_topic_list_subscriptions_no_paging(self): from google.gax import INITIAL_PAGE From 88243b084f787e051d2b8902c2657742d3d0d434 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 4 Jul 2016 09:22:40 -0600 Subject: [PATCH 084/103] Fix pubsub context manager exit error. closes #1955 --- gcloud/pubsub/test_topic.py | 14 ++++++++++++++ gcloud/pubsub/topic.py | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/gcloud/pubsub/test_topic.py b/gcloud/pubsub/test_topic.py index 7bce0e3c3198..85f8d7ad7ceb 100644 --- a/gcloud/pubsub/test_topic.py +++ b/gcloud/pubsub/test_topic.py @@ -228,6 +228,18 @@ def test_publish_multiple_w_bound_client(self): self.assertEqual(api._topic_published, (self.TOPIC_PATH, [MESSAGE1, MESSAGE2])) + def test_publish_w_no_messages(self): + client = _Client(project=self.PROJECT) + api = client.publisher_api = _FauxPublisherAPI() + api._topic_publish_response = [] + topic = self._makeOne(self.TOPIC_NAME, client=client) + + with topic.batch() as batch: + pass + + self.assertEqual(list(batch.messages), []) + self.assertEqual(api._api_called, 0) + def test_publish_multiple_w_alternate_client(self): import base64 PAYLOAD1 = b'This is the first message text' @@ -716,6 +728,7 @@ def test_context_mgr_failure(self): class _FauxPublisherAPI(object): + _api_called = 0 def topic_create(self, topic_path): self._topic_created = topic_path @@ -735,6 +748,7 @@ def topic_delete(self, topic_path): def topic_publish(self, topic_path, messages): self._topic_published = topic_path, messages + self._api_called += 1 return self._topic_publish_response def topic_list_subscriptions(self, topic_path, page_size=None, diff --git a/gcloud/pubsub/topic.py b/gcloud/pubsub/topic.py index 6169ada84911..4ebf6a628216 100644 --- a/gcloud/pubsub/topic.py +++ b/gcloud/pubsub/topic.py @@ -416,7 +416,7 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is None: + if exc_type is None and len(self.messages): self.commit() def __iter__(self): From d49b048cb2938a3ad0a17df115b6f1d6b75e50f9 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 4 Jul 2016 09:38:48 -0600 Subject: [PATCH 085/103] Move messages check to commit() --- gcloud/pubsub/topic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/gcloud/pubsub/topic.py b/gcloud/pubsub/topic.py index 4ebf6a628216..568434789ac4 100644 --- a/gcloud/pubsub/topic.py +++ b/gcloud/pubsub/topic.py @@ -416,7 +416,7 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is None and len(self.messages): + if exc_type is None: self.commit() def __iter__(self): @@ -443,6 +443,9 @@ def commit(self, client=None): :param client: the client to use. If not passed, falls back to the ``client`` stored on the current batch. """ + if not self.messages: + return + if client is None: client = self.client api = client.publisher_api From c28f3de5711a6b592e197c4d6885446db405259c Mon Sep 17 00:00:00 2001 From: JJ Geewax Date: Tue, 5 Jul 2016 12:08:22 -0400 Subject: [PATCH 086/103] Fix #1957 - Typo in docs /cc @bhzunami --- docs/resource-manager-api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/resource-manager-api.rst b/docs/resource-manager-api.rst index fca738c62513..6caf53e93b1d 100644 --- a/docs/resource-manager-api.rst +++ b/docs/resource-manager-api.rst @@ -29,7 +29,7 @@ Here's a quick example of the full life-cycle: >>> # Create a new project >>> new_project = client.new_project('your-project-id-here', - ... name='My new project) + ... name='My new project') >>> new_project.create() >>> # Update an existing project From de1dd846bca2a44f8761e983953ac290c1fbfb33 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 6 Jul 2016 11:46:54 -0600 Subject: [PATCH 087/103] Update references to cloud logging. --- README.rst | 8 ++++---- docs/index.rst | 2 +- docs/logging-client.rst | 3 +-- gcloud/logging/__init__.py | 2 +- gcloud/logging/client.py | 2 +- gcloud/logging/connection.py | 6 +++--- gcloud/logging/entries.py | 2 +- gcloud/logging/metric.py | 2 +- gcloud/logging/sink.py | 2 +- system_tests/logging_.py | 6 +++--- 10 files changed, 17 insertions(+), 18 deletions(-) diff --git a/README.rst b/README.rst index e062737f9cc9..cb43d767c5ec 100644 --- a/README.rst +++ b/README.rst @@ -20,14 +20,14 @@ This client supports the following Google Cloud Platform services: - `Google Cloud Pub/Sub`_ - `Google BigQuery`_ - `Google Cloud Resource Manager`_ -- `Google Cloud Logging`_ +- `Google Stackdriver Logging`_ .. _Google Cloud Datastore: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-datastore .. _Google Cloud Storage: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-storage .. _Google Cloud Pub/Sub: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-pubsub .. _Google BigQuery: https://github.com/GoogleCloudPlatform/gcloud-python#google-bigquery .. _Google Cloud Resource Manager: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-resource-manager -.. _Google Cloud Logging: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-logging +.. _Google Stackdriver Logging: https://github.com/GoogleCloudPlatform/gcloud-python#google-cloud-logging If you need support for other Google APIs, check out the `Google APIs Python Client library`_. @@ -239,7 +239,7 @@ manage projects using this Client Library. .. _Resource Manager documentation: https://googlecloudplatform.github.io/gcloud-python/stable/resource-manager-api.html -Google Cloud Logging +Google Stackdriver Logging -------------------- `Stackdriver Logging`_ API (`Logging API docs`_) allows you to store, search, @@ -264,7 +264,7 @@ Example of fetching entries: print entry.payload See the ``gcloud-python`` API `logging documentation`_ to learn how to connect -to Cloud logging using this Client Library. +to Stackdriver Logging using this Client Library. .. _logging documentation: https://googlecloudplatform.github.io/gcloud-python/stable/logging-usage.html diff --git a/docs/index.rst b/docs/index.rst index b263dba70531..56705eba9b15 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -102,7 +102,7 @@ .. toctree:: :maxdepth: 0 :hidden: - :caption: Cloud Logging + :caption: Stackdriver Logging logging-usage Client diff --git a/docs/logging-client.rst b/docs/logging-client.rst index 3b7db274a803..18ce6a3baf87 100644 --- a/docs/logging-client.rst +++ b/docs/logging-client.rst @@ -1,4 +1,4 @@ -Logging Client +Stackdriver Logging Client ============== .. automodule:: gcloud.logging.client @@ -11,4 +11,3 @@ Connection .. automodule:: gcloud.logging.connection :members: :show-inheritance: - diff --git a/gcloud/logging/__init__.py b/gcloud/logging/__init__.py index 67b0386329e9..578d1c70aa29 100644 --- a/gcloud/logging/__init__.py +++ b/gcloud/logging/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Google Cloud Logging API wrapper.""" +"""Google Stackdriver Logging API wrapper.""" from gcloud.logging.client import Client from gcloud.logging.connection import Connection diff --git a/gcloud/logging/client.py b/gcloud/logging/client.py index 8b4aae0bdf46..d662a3a2b349 100644 --- a/gcloud/logging/client.py +++ b/gcloud/logging/client.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Client for interacting with the Google Cloud Logging API.""" +"""Client for interacting with the Google Stackdriver Logging API.""" import os diff --git a/gcloud/logging/connection.py b/gcloud/logging/connection.py index 83e1eadcd74c..9f570cde398d 100644 --- a/gcloud/logging/connection.py +++ b/gcloud/logging/connection.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Create / interact with gcloud logging connections.""" +"""Create / interact with Stackdriver Logging connections.""" from gcloud import connection as base_connection class Connection(base_connection.JSONConnection): - """A connection to Google Cloud Logging via the JSON REST API. + """A connection to Google Stackdriver Logging via the JSON REST API. :type credentials: :class:`oauth2client.client.OAuth2Credentials` :param credentials: (Optional) The OAuth2 Credentials to use for this @@ -45,7 +45,7 @@ class Connection(base_connection.JSONConnection): 'https://www.googleapis.com/auth/logging.write', 'https://www.googleapis.com/auth/logging.admin', 'https://www.googleapis.com/auth/cloud-platform') - """The scopes required for authenticating as a Cloud Logging consumer.""" + """The scopes required for authenticating as a Stackdriver Logging consumer.""" class _LoggingAPI(object): diff --git a/gcloud/logging/entries.py b/gcloud/logging/entries.py index ed492b86e8c8..f8caaba3cdbd 100644 --- a/gcloud/logging/entries.py +++ b/gcloud/logging/entries.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Log entries within the Google Cloud Logging API.""" +"""Log entries within the Google Stackdriver Logging API.""" import json import re diff --git a/gcloud/logging/metric.py b/gcloud/logging/metric.py index b22ced4349ba..b05269e39ac5 100644 --- a/gcloud/logging/metric.py +++ b/gcloud/logging/metric.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Define Logging API Metrics.""" +"""Define Stackdriver Logging API Metrics.""" from gcloud.exceptions import NotFound diff --git a/gcloud/logging/sink.py b/gcloud/logging/sink.py index b59096713731..07a6dba2a0d0 100644 --- a/gcloud/logging/sink.py +++ b/gcloud/logging/sink.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Define Logging API Sinks.""" +"""Define Stackdriver Logging API Sinks.""" from gcloud.exceptions import NotFound diff --git a/system_tests/logging_.py b/system_tests/logging_.py index db29f854d4d0..958b0af9c03e 100644 --- a/system_tests/logging_.py +++ b/system_tests/logging_.py @@ -206,7 +206,7 @@ def _init_storage_bucket(self): BUCKET_URI = 'storage.googleapis.com/%s' % (BUCKET_NAME,) # Create the destination bucket, and set up the ACL to allow - # Cloud Logging to write into it. + # Stackdriver Logging to write into it. storage_client = storage.Client() bucket = storage_client.create_bucket(BUCKET_NAME) self.to_delete.append(bucket) @@ -231,7 +231,7 @@ def test_create_sink_pubsub_topic(self): from gcloud import pubsub # Create the destination topic, and set up the IAM policy to allow - # Cloud Logging to write into it. + # Stackdriver Logging to write into it. pubsub_client = pubsub.Client() topic = pubsub_client.topic(TOPIC_NAME) topic.create() @@ -256,7 +256,7 @@ def _init_bigquery_dataset(self): Config.CLIENT.project, DATASET_NAME,) # Create the destination dataset, and set up the ACL to allow - # Cloud Logging to write into it. + # Stackdriver Logging to write into it. bigquery_client = bigquery.Client() dataset = bigquery_client.dataset(DATASET_NAME) dataset.create() From 6ec956b00ebff337be92fe658f0c6710300e0158 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 6 Jul 2016 12:15:41 -0600 Subject: [PATCH 088/103] Update Monitoring references. --- docs/index.rst | 2 +- docs/logging-usage.rst | 2 +- docs/monitoring-client.rst | 3 +-- docs/monitoring-usage.rst | 12 ++++++------ gcloud/monitoring/__init__.py | 2 +- gcloud/monitoring/client.py | 5 +++-- gcloud/monitoring/connection.py | 4 ++-- gcloud/monitoring/metric.py | 4 ++-- gcloud/monitoring/query.py | 4 ++-- gcloud/monitoring/resource.py | 5 +++-- gcloud/monitoring/timeseries.py | 4 ++-- 11 files changed, 24 insertions(+), 23 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 56705eba9b15..0c2b6ffbedb9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -114,7 +114,7 @@ .. toctree:: :maxdepth: 0 :hidden: - :caption: Cloud Monitoring + :caption: Stackdriver Monitoring monitoring-usage Client diff --git a/docs/logging-usage.rst b/docs/logging-usage.rst index d0acc528f0f2..3dd2a6efab19 100644 --- a/docs/logging-usage.rst +++ b/docs/logging-usage.rst @@ -136,7 +136,7 @@ Manage log metrics ------------------ Metrics are counters of entries which match a given filter. They can be -used within Cloud Monitoring to create charts and alerts. +used within Stackdriver Monitoring to create charts and alerts. Create a metric: diff --git a/docs/monitoring-client.rst b/docs/monitoring-client.rst index abe1bdbac325..37e8bac8ed8b 100644 --- a/docs/monitoring-client.rst +++ b/docs/monitoring-client.rst @@ -1,4 +1,4 @@ -Monitoring Client +Stackdriver Monitoring Client ================= .. automodule:: gcloud.monitoring.client @@ -11,4 +11,3 @@ Connection .. automodule:: gcloud.monitoring.connection :members: :show-inheritance: - diff --git a/docs/monitoring-usage.rst b/docs/monitoring-usage.rst index 5fdfc8b3234c..adfd93cefe67 100644 --- a/docs/monitoring-usage.rst +++ b/docs/monitoring-usage.rst @@ -5,7 +5,7 @@ Using the API Introduction ------------ -With the Monitoring API, you can work with Stackdriver metric data +With the Stackdriver Monitoring API, you can work with Stackdriver metric data pertaining to monitored resources in Google Cloud Platform (GCP) or elsewhere. @@ -21,7 +21,7 @@ Essential concepts: - A **time series** is a collection of data points associated with points or intervals in time. -Please refer to the documentation for the `Monitoring API`_ for +Please refer to the documentation for the `Stackdriver Monitoring API`_ for more information. At present, this client library supports the following features @@ -32,13 +32,13 @@ of the API: - Creation and deletion of metric descriptors for custom metrics. - (Writing of custom metric data will be coming soon.) -.. _Monitoring API: https://cloud.google.com/monitoring/api/v3/ +.. _Stackdriver Monitoring API: https://cloud.google.com/monitoring/api/v3/ -The Monitoring Client Object ----------------------------- +The Stackdriver Monitoring Client Object +---------------------------------------- -The monitoring client library generally makes its +The Stackdriver Monitoring client library generally makes its functionality available as methods of the monitoring :class:`~gcloud.monitoring.client.Client` class. A :class:`~gcloud.monitoring.client.Client` instance holds diff --git a/gcloud/monitoring/__init__.py b/gcloud/monitoring/__init__.py index 31191d1660b0..26b92da74e40 100644 --- a/gcloud/monitoring/__init__.py +++ b/gcloud/monitoring/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Google Monitoring API wrapper.""" +"""Google Stackdriver Monitoring API wrapper.""" from gcloud.monitoring.client import Client from gcloud.monitoring.connection import Connection diff --git a/gcloud/monitoring/client.py b/gcloud/monitoring/client.py index fb7a06d0d0dd..25e729c5fc9c 100644 --- a/gcloud/monitoring/client.py +++ b/gcloud/monitoring/client.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Client for interacting with the `Google Monitoring API (V3)`_. +"""Client for interacting with the `Google Stackdriver Monitoring API (V3)`_. Example:: @@ -24,7 +24,8 @@ At present, the client supports querying of time series, metric descriptors, and monitored resource descriptors. -.. _Google Monitoring API (V3): https://cloud.google.com/monitoring/api/v3/ +.. _Google Stackdriver Monitoring API (V3): + https://cloud.google.com/monitoring/api/v3/ """ from gcloud.client import JSONClient diff --git a/gcloud/monitoring/connection.py b/gcloud/monitoring/connection.py index 5887da62e65d..c9f804e5e77e 100644 --- a/gcloud/monitoring/connection.py +++ b/gcloud/monitoring/connection.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Create / interact with gcloud monitoring connections.""" +"""Create / interact with Stackdriver Monitoring connections.""" from gcloud import connection as base_connection class Connection(base_connection.JSONConnection): - """A connection to Google Monitoring via the JSON REST API. + """A connection to Google Stackdriver Monitoring via the JSON REST API. :type credentials: :class:`oauth2client.client.OAuth2Credentials` :param credentials: (Optional) The OAuth2 Credentials to use for this diff --git a/gcloud/monitoring/metric.py b/gcloud/monitoring/metric.py index 87e2a18ac99c..ea8b93a442a2 100644 --- a/gcloud/monitoring/metric.py +++ b/gcloud/monitoring/metric.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Metric Descriptors for the `Google Monitoring API (V3)`_. +"""Metric Descriptors for the `Google Stackdriver Monitoring API (V3)`_. -.. _Google Monitoring API (V3): +.. _Google Stackdriver Monitoring API (V3): https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ projects.metricDescriptors """ diff --git a/gcloud/monitoring/query.py b/gcloud/monitoring/query.py index 91838b6b52eb..44e7f917de87 100644 --- a/gcloud/monitoring/query.py +++ b/gcloud/monitoring/query.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Time series query for the `Google Monitoring API (V3)`_. +"""Time series query for the `Google Stackdriver Monitoring API (V3)`_. -.. _Google Monitoring API (V3): +.. _Google Stackdriver Monitoring API (V3): https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ projects.timeSeries/list """ diff --git a/gcloud/monitoring/resource.py b/gcloud/monitoring/resource.py index a992ff9f1312..b4391f28e757 100644 --- a/gcloud/monitoring/resource.py +++ b/gcloud/monitoring/resource.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Monitored Resource Descriptors for the `Google Monitoring API (V3)`_. +"""Monitored Resource Descriptors for the +`Google Stackdriver Monitoring API (V3)`_. -.. _Google Monitoring API (V3): +.. _Google Stackdriver Monitoring API (V3): https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ projects.monitoredResourceDescriptors """ diff --git a/gcloud/monitoring/timeseries.py b/gcloud/monitoring/timeseries.py index 1b7e19f4a30c..0b4f98189d8c 100644 --- a/gcloud/monitoring/timeseries.py +++ b/gcloud/monitoring/timeseries.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Time series for the `Google Monitoring API (V3)`_. +"""Time series for the `Google Stackdriver Monitoring API (V3)`_. Features intentionally omitted from this first version of the client library: * Writing time series. * Natural representation of distribution values. -.. _Google Monitoring API (V3): +.. _Google Stackdriver Monitoring API (V3): https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries """ From 6ffe88dc6d89575ad94816675ec0f70285204b77 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 6 Jul 2016 14:26:22 -0400 Subject: [PATCH 089/103] Skip remainder of backoff loop on failure. --- system_tests/logging_.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system_tests/logging_.py b/system_tests/logging_.py index 683eca20bece..5f9fb207974a 100644 --- a/system_tests/logging_.py +++ b/system_tests/logging_.py @@ -45,6 +45,7 @@ def _retry_backoff(result_predicate, meth, *args, **kw): raise if backoff_intervals: time.sleep(backoff_intervals.pop(0)) + continue else: raise if result_predicate(result): From a2f4715348164cb1efea407ffa613377a34bacd1 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 6 Jul 2016 12:19:08 -0600 Subject: [PATCH 090/103] Fix sphinx warnings for headers. --- README.rst | 2 +- docs/logging-client.rst | 2 +- docs/monitoring-client.rst | 2 +- gcloud/logging/connection.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index cb43d767c5ec..3410150b7177 100644 --- a/README.rst +++ b/README.rst @@ -240,7 +240,7 @@ manage projects using this Client Library. .. _Resource Manager documentation: https://googlecloudplatform.github.io/gcloud-python/stable/resource-manager-api.html Google Stackdriver Logging --------------------- +-------------------------- `Stackdriver Logging`_ API (`Logging API docs`_) allows you to store, search, analyze, monitor, and alert on log data and events from Google Cloud Platform. diff --git a/docs/logging-client.rst b/docs/logging-client.rst index 18ce6a3baf87..fb5f009947cd 100644 --- a/docs/logging-client.rst +++ b/docs/logging-client.rst @@ -1,5 +1,5 @@ Stackdriver Logging Client -============== +========================== .. automodule:: gcloud.logging.client :members: diff --git a/docs/monitoring-client.rst b/docs/monitoring-client.rst index 37e8bac8ed8b..baea7d2fd592 100644 --- a/docs/monitoring-client.rst +++ b/docs/monitoring-client.rst @@ -1,5 +1,5 @@ Stackdriver Monitoring Client -================= +============================= .. automodule:: gcloud.monitoring.client :members: diff --git a/gcloud/logging/connection.py b/gcloud/logging/connection.py index 9f570cde398d..fa60e181b3fd 100644 --- a/gcloud/logging/connection.py +++ b/gcloud/logging/connection.py @@ -45,7 +45,7 @@ class Connection(base_connection.JSONConnection): 'https://www.googleapis.com/auth/logging.write', 'https://www.googleapis.com/auth/logging.admin', 'https://www.googleapis.com/auth/cloud-platform') - """The scopes required for authenticating as a Stackdriver Logging consumer.""" + """The scopes required for authenticating as a Logging consumer.""" class _LoggingAPI(object): From ff737acbab6bd4d28677a5c8d8df486e191befbc Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 8 Jul 2016 14:21:03 -0400 Subject: [PATCH 091/103] Attempt to un-break Appveyor: - Put '_testing' on PYTHONPATH only for Py3k (debug grpico-on-Py3k issues later). - Use 'setup.py develop' rather than 'setup.py build' to get dependencies installed. - Run 'pip list' afterwards to show what got installed. Toward #1863. --- appveyor.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index d9f8183aa73c..beecafaab769 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -30,10 +30,14 @@ environment: - PYTHON: "C:\\Python34" PYTHON_VERSION: "3.4.4" PYTHON_ARCH: "32" + # Use mocked-up GRPC for now + GRPC_PATH: "_testing" - PYTHON: "C:\\Python34-x64" PYTHON_VERSION: "3.4.4" PYTHON_ARCH: "64" + # Use mocked-up GRPC for now + GRPC_PATH: "_testing" # Python 3.5.1 is the latest Python 3.5 with a Windows installer # Python 3.5.1 is the overall latest @@ -41,10 +45,14 @@ environment: - PYTHON: "C:\\Python35" PYTHON_VERSION: "3.5.1" PYTHON_ARCH: "32" + # Use mocked-up GRPC for now + GRPC_PATH: "_testing" - PYTHON: "C:\\Python35-x64" PYTHON_VERSION: "3.5.1" PYTHON_ARCH: "64" + # Use mocked-up GRPC for now + GRPC_PATH: "_testing" install: - ECHO "Filesystem root:" @@ -78,10 +86,12 @@ install: build_script: # Build the compiled extension - - "%CMD_IN_ENV% python setup.py build" + #- "%CMD_IN_ENV% python setup.py build" + - "%CMD_IN_ENV% python setup.py develop" + - "%CMD_IN_ENV% pip list" test_script: - - "set PYTHONPATH=_testing" + - "set PYTHONPATH=%GRPC_PATH%" # Run the project tests - "%CMD_IN_ENV% python setup.py nosetests" From e20df5df7a1151cee43b18664a651391fdceb81f Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 8 Jul 2016 15:32:31 -0400 Subject: [PATCH 092/103] Update disabled messages from new pylint. --- scripts/run_pylint.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index 7fa7662efb07..48a393153368 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -51,6 +51,8 @@ 'import-error', 'invalid-name', 'missing-docstring', + 'missing-raises-doc', + 'missing-returns-doc', 'no-init', 'no-self-use', 'superfluous-parens', From 028ea3c29c8d6b16e00c0d41b7a0736fcb7f8cea Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 8 Jul 2016 14:33:09 -0400 Subject: [PATCH 093/103] Change error for generating signed url. --- gcloud/credentials.py | 12 ++++++++++++ gcloud/test_credentials.py | 20 ++++++++++++++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/gcloud/credentials.py b/gcloud/credentials.py index 3d95733285c6..e6d78b9cb375 100644 --- a/gcloud/credentials.py +++ b/gcloud/credentials.py @@ -95,10 +95,20 @@ def _get_signed_query_params(credentials, expiration, string_to_sign): :type string_to_sign: string :param string_to_sign: The string to be signed by the credentials. + :raises AttributeError: If :meth: sign_blob is unavailable. + :rtype: dict :returns: Query parameters matching the signing credentials with a signed payload. """ + if not hasattr(credentials, 'sign_blob'): + raise AttributeError('you need a private key to sign credentials.' + 'the credentials you are currently using %s ' + 'just contains a token. see https://googlecloud' + 'platform.github.io/gcloud-python/stable/gcloud-' + 'auth.html#setting-up-a-service-account for more ' + 'details.' % type(credentials)) + _, signature_bytes = credentials.sign_blob(string_to_sign) signature = base64.b64encode(signature_bytes) service_account_name = credentials.service_account_email @@ -115,6 +125,8 @@ def _get_expiration_seconds(expiration): :type expiration: int, long, datetime.datetime, datetime.timedelta :param expiration: When the signed URL should expire. + :raises TypeError: When expiration is not an integer. + :rtype: int :returns: a timestamp as an absolute number of seconds. """ diff --git a/gcloud/test_credentials.py b/gcloud/test_credentials.py index e4108cff11c9..f69200cd2ec3 100644 --- a/gcloud/test_credentials.py +++ b/gcloud/test_credentials.py @@ -101,6 +101,18 @@ def test_w_custom_fields(self): generation=generation) +class Test_generate_signed_url_exception(unittest2.TestCase): + def test_with_google_credentials(self): + import time + from gcloud.credentials import generate_signed_url + RESOURCE = '/name/path' + + credentials = _GoogleCredentials() + expiration = int(time.time() + 5) + self.assertRaises(AttributeError, generate_signed_url, credentials, + resource=RESOURCE, expiration=expiration) + + class Test__get_signed_query_params(unittest2.TestCase): def _callFUT(self, credentials, expiration, string_to_sign): @@ -110,8 +122,6 @@ def _callFUT(self, credentials, expiration, string_to_sign): def test_it(self): import base64 - from gcloud._testing import _Monkey - from gcloud import credentials as MUT SIG_BYTES = b'DEADBEEF' ACCOUNT_NAME = object() @@ -226,6 +236,12 @@ def sign_blob(self, bytes_to_sign): return None, self._sign_result +class _GoogleCredentials(object): + + def __init__(self, service_account_email='testing@example.com'): + self.service_account_email = service_account_email + + class _Client(object): def __init__(self): From c1c13be100e1346776f5dd05caee3d16e3efd5dc Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 11 Jul 2016 14:50:10 -0400 Subject: [PATCH 094/103] Add quotes to CSV reference. closes #1970 --- docs/bigquery-usage.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/bigquery-usage.rst b/docs/bigquery-usage.rst index edab2e1510d4..28d1fd9eb83b 100644 --- a/docs/bigquery-usage.rst +++ b/docs/bigquery-usage.rst @@ -236,7 +236,7 @@ Upload table data from a file: ... SchemaField('full_name', 'STRING', mode='required'), ... SchemaField('age', 'INTEGER', mode='required)] >>> with open('person_ages.csv', 'rb') as csv_file: - ... table.upload_from_file(csv_file, CSV, + ... table.upload_from_file(csv_file, 'CSV', ... create_disposition='CREATE_IF_NEEDED') Get rows from a table's data: From d5dfcef1a46b0df7e13546a9887b372db3469ab0 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 11 Jul 2016 15:59:05 -0400 Subject: [PATCH 095/103] Appveyor: this-time-for-sure-rocky Install GAX-related wrappers via pip to work around #1972. --- appveyor.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index beecafaab769..3fb89ebfd473 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -83,15 +83,16 @@ install: # pip will build them from source using the MSVC compiler matching the # target Python version and architecture - "%CMD_IN_ENV% pip install wheel nose nose-exclude unittest2 cryptography grpcio" + # Install sometimes-problemaatic gRPC-related dependencies + - "%CMD_IN_ENV% pip install grpcio gax-google-pubsub-v1 gax-google-logging-v2" build_script: # Build the compiled extension - #- "%CMD_IN_ENV% python setup.py build" - - "%CMD_IN_ENV% python setup.py develop" - - "%CMD_IN_ENV% pip list" + - "%CMD_IN_ENV% python setup.py build" test_script: - "set PYTHONPATH=%GRPC_PATH%" + - "%CMD_IN_ENV% pip list" # Run the project tests - "%CMD_IN_ENV% python setup.py nosetests" From 844ec530ccfd43c37c97c865df23170c9a708073 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 11 Jul 2016 17:05:06 -0400 Subject: [PATCH 096/103] Drop unused imports flagged by pylint 1.6. --- gcloud/bigquery/test_dataset.py | 2 -- gcloud/bigquery/test_query.py | 2 -- gcloud/bigquery/test_table.py | 1 - gcloud/dns/test_changes.py | 1 - gcloud/storage/test_acl.py | 1 - 5 files changed, 7 deletions(-) diff --git a/gcloud/bigquery/test_dataset.py b/gcloud/bigquery/test_dataset.py index e1caa1f81dff..cfa6c27c17a5 100644 --- a/gcloud/bigquery/test_dataset.py +++ b/gcloud/bigquery/test_dataset.py @@ -636,8 +636,6 @@ def test_delete_w_alternate_client(self): self.assertEqual(req['path'], '/%s' % PATH) def test_list_tables_empty(self): - from gcloud.bigquery.table import Table - conn = _Connection({}) client = _Client(project=self.PROJECT, connection=conn) dataset = self._makeOne(self.DS_NAME, client=client) diff --git a/gcloud/bigquery/test_query.py b/gcloud/bigquery/test_query.py index c0e21541c5a7..4fcb3274c12c 100644 --- a/gcloud/bigquery/test_query.py +++ b/gcloud/bigquery/test_query.py @@ -323,8 +323,6 @@ def __init__(self, *responses): self._requested = [] def api_request(self, **kw): - from gcloud.exceptions import NotFound self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] return response diff --git a/gcloud/bigquery/test_table.py b/gcloud/bigquery/test_table.py index a0edf9242824..7aa6e52de1e5 100644 --- a/gcloud/bigquery/test_table.py +++ b/gcloud/bigquery/test_table.py @@ -834,7 +834,6 @@ def test_update_w_alternate_client(self): import datetime from gcloud._helpers import UTC from gcloud._helpers import _millis - from gcloud.bigquery.table import SchemaField PATH = 'projects/%s/datasets/%s/tables/%s' % ( self.PROJECT, self.DS_NAME, self.TABLE_NAME) diff --git a/gcloud/dns/test_changes.py b/gcloud/dns/test_changes.py index f7902a106bc9..ed278a5b058f 100644 --- a/gcloud/dns/test_changes.py +++ b/gcloud/dns/test_changes.py @@ -56,7 +56,6 @@ def _makeResource(self): def _verifyResourceProperties(self, changes, resource, zone): from gcloud._helpers import _rfc3339_to_datetime - from gcloud._helpers import UTC self.assertEqual(changes.name, resource['id']) started = _rfc3339_to_datetime(resource['startTime']) self.assertEqual(changes.started, started) diff --git a/gcloud/storage/test_acl.py b/gcloud/storage/test_acl.py index bb8dbea85404..a3fd2d22c8c6 100644 --- a/gcloud/storage/test_acl.py +++ b/gcloud/storage/test_acl.py @@ -800,7 +800,6 @@ def __init__(self, *responses): self._deleted = [] def api_request(self, **kw): - from gcloud.exceptions import NotFound self._requested.append(kw) response, self._responses = self._responses[0], self._responses[1:] return response From 06800cf7b6edeb79760614e96ed417a1d32fcd7f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 11 Jul 2016 17:06:22 -0400 Subject: [PATCH 097/103] Suppress new pylint 1.6 opinions for *all* files, not just tests. See #1968 for eventual reversal. This change reverts #1957 in favor of a more global suppression. --- scripts/pylintrc_default | 12 ++++++++++++ scripts/run_pylint.py | 2 -- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/pylintrc_default b/scripts/pylintrc_default index e68626855dc3..413ccd72ba41 100644 --- a/scripts/pylintrc_default +++ b/scripts/pylintrc_default @@ -90,6 +90,13 @@ load-plugins=pylint.extensions.check_docs # - no-name-in-module: Error gives a lot of false positives for names which # are defined dynamically. Also, any truly missing names # will be detected by our 100% code coverage. +# +# New opinions in pylint 1.6, enforcing PEP 257. #1968 for eventual fixes +# - catching-non-exception +# - missing-raises-doc +# - missing-returns-doc +# - redundant-returns-doc +# - ungrouped-imports disable = maybe-no-member, no-member, @@ -99,6 +106,11 @@ disable = redefined-variable-type, wrong-import-position, no-name-in-module, + catching-non-exception, + missing-raises-doc, + missing-returns-doc, + redundant-returns-doc, + ungrouped-imports [REPORTS] diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index 48a393153368..7fa7662efb07 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -51,8 +51,6 @@ 'import-error', 'invalid-name', 'missing-docstring', - 'missing-raises-doc', - 'missing-returns-doc', 'no-init', 'no-self-use', 'superfluous-parens', From dec0c8b5517bbafceb1344f4011117a4f8cd0b81 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 11 Jul 2016 17:07:09 -0400 Subject: [PATCH 098/103] Suppress pylint-1.6-choking file. --- scripts/run_pylint.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index 7fa7662efb07..89e236856ff6 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -37,6 +37,7 @@ ] IGNORED_FILES = [ os.path.join('docs', 'conf.py'), + os.path.join('gcloud', 'streaming', 'http_wrapper.py'), 'setup.py', ] SCRIPTS_DIR = os.path.abspath(os.path.dirname(__file__)) From 7d1748286e3f715d877a1d1dba46da967a3a22ec Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 11 Jul 2016 17:32:19 -0400 Subject: [PATCH 099/103] Continue testing all files in a set even if one exits w/ non-zero status. --- scripts/run_pylint.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index 89e236856ff6..a972c2c888f2 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -226,13 +226,17 @@ def lint_fileset(filenames, rcfile, description): if os.path.exists(filename)] if filenames: rc_flag = '--rcfile=%s' % (rcfile,) - pylint_shell_command = ['pylint', rc_flag] + filenames - status_code = subprocess.call(pylint_shell_command) - if status_code != 0: - error_message = ('Pylint failed on %s with ' - 'status %d.' % (description, status_code)) - print(error_message, file=sys.stderr) - sys.exit(status_code) + pylint_shell_command = ['pylint', rc_flag] + errors = {} # filename -> status_code + for filename in filenames: + cmd = pylint_shell_command + [filename] + status_code = subprocess.call(cmd) + if status_code != 0: + errors[filename] = status_code + if errors: + for filename, status_code in sorted(errors.items()): + print('%-30s: %d' % (filename, status_code), file=sys.stderr) + sys.exit(len(errors)) else: print('Skipping %s, no files to lint.' % (description,)) From 1285d8c1913dcd27d68763761e0abc2415ccc074 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 11 Jul 2016 17:32:57 -0400 Subject: [PATCH 100/103] Skip checking two files which cause pylint 1.6 to barf. See: https://github.com/PyCQA/pylint/issues/998. --- scripts/run_pylint.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index a972c2c888f2..52449e352828 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -37,6 +37,9 @@ ] IGNORED_FILES = [ os.path.join('docs', 'conf.py'), + # Both these files cause pylint 1.6 to barf. See: + # https://github.com/PyCQA/pylint/issues/998 + os.path.join('gcloud', 'bigtable', 'happybase', 'connection.py'), os.path.join('gcloud', 'streaming', 'http_wrapper.py'), 'setup.py', ] From bbfefee823a1025d65c781c08ec3e89ad4b1cbe4 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 11 Jul 2016 20:57:32 -0400 Subject: [PATCH 101/103] Attempt to fix windows path crash. closes #1975 --- gcloud/_helpers.py | 9 +++++---- gcloud/test__helpers.py | 28 ++++++++++++++++++++-------- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/gcloud/_helpers.py b/gcloud/_helpers.py index 834197a42f55..c6267888be36 100644 --- a/gcloud/_helpers.py +++ b/gcloud/_helpers.py @@ -194,10 +194,11 @@ def _default_service_project_id(): search_paths.append(os.path.expanduser(DEFAULT_CONFIGURATION_PATH)) except ImportError: pass - win32_config_path = os.path.join(os.getenv('APPDATA', ''), - 'gcloud', 'configurations', - 'config_default') - search_paths.append(win32_config_path) + + windows_config_path = os.path.join(os.getenv('APPDATA', ''), + 'gcloud', 'configurations', + 'config_default') + search_paths.append(windows_config_path) config = configparser.RawConfigParser() config.read(search_paths) diff --git a/gcloud/test__helpers.py b/gcloud/test__helpers.py index 3f843ef6f4d5..4af1082cc551 100644 --- a/gcloud/test__helpers.py +++ b/gcloud/test__helpers.py @@ -183,34 +183,41 @@ def test_no_environment(self): class Test__get_default_service_project_id(unittest2.TestCase): config_path = '.config/gcloud/configurations/' config_file = 'config_default' + temp_APPDATA = '' def setUp(self): import tempfile import os self.temp_config_path = tempfile.mkdtemp() + self.temp_APPDATA = os.getenv('APPDATA') + if self.temp_APPDATA: # pragma: NO COVER Windows + os.environ['APPDATA'] = self.temp_config_path + self.config_path = os.path.join(os.getenv('APPDATA', '~/.config'), + 'gcloud', 'configurations') conf_path = os.path.join(self.temp_config_path, self.config_path) os.makedirs(conf_path) - full_config_path = os.path.join(conf_path, self.config_file) + self.temp_config_file = os.path.join(conf_path, self.config_file) - self.temp_config_file = full_config_path - - with open(full_config_path, 'w') as conf_file: + with open(self.temp_config_file, 'w') as conf_file: conf_file.write('[core]\nproject = test-project-id') def tearDown(self): import shutil - - shutil.rmtree(self.temp_config_path) + import os + if os.path.exists(self.temp_config_path): + shutil.rmtree(self.temp_config_path) + if self.temp_APPDATA: # pragma: NO COVER Windows + os.environ['APPDATA'] = self.temp_APPDATA def callFUT(self, project_id=None): import os from gcloud._helpers import _default_service_project_id from gcloud._testing import _Monkey - def mock_expanduser(path=''): - if project_id and path.startswith('~'): + def mock_expanduser(path=None): + if project_id and path: __import__('pwd') # Simulate actual expanduser imports. return self.temp_config_file return '' @@ -224,6 +231,9 @@ def test_read_from_cli_info(self): def test_gae_without_expanduser(self): import sys + import shutil + shutil.rmtree(self.temp_config_path) + try: sys.modules['pwd'] = None # Blocks pwd from being imported. project_id = self.callFUT('test-project-id') @@ -232,6 +242,8 @@ def test_gae_without_expanduser(self): del sys.modules['pwd'] # Unblocks importing of pwd. def test_info_value_not_present(self): + import shutil + shutil.rmtree(self.temp_config_path) project_id = self.callFUT() self.assertEqual(None, project_id) From 7ea997c561ce3c68586395a2bbcf902b4ed71641 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Tue, 12 Jul 2016 13:17:24 -0400 Subject: [PATCH 102/103] Update DNS resource_record_set(). closes #1886 --- docs/dns-usage.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/dns-usage.rst b/docs/dns-usage.rst index 7ebaf13c167d..42bd1d5d8548 100644 --- a/docs/dns-usage.rst +++ b/docs/dns-usage.rst @@ -140,7 +140,7 @@ bundling additions to or deletions from the set. >>> zone = client.zone('acme-co', 'example.com') >>> TWO_HOURS = 2 * 60 * 60 # seconds >>> record_set = zone.resource_record_set( - ... 'www.example.com', 'CNAME', TWO_HOURS, 'www1.example.com') + ... 'www.example.com.', 'CNAME', TWO_HOURS, ['www1.example.com.',]) >>> changes = zone.changes() >>> changes.add_record_set(record_set) >>> changes.create() # API request From 6ae83853f994720935640ea6c0cf20f140a155e2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 27 May 2016 19:24:40 -0400 Subject: [PATCH 103/103] Convert table usage examples to tested snippets. --- docs/bigquery-usage.rst | 109 +++++--------- docs/bigquery_snippets.py | 297 +++++++++++++++++++++++++++++++++++++- 2 files changed, 324 insertions(+), 82 deletions(-) diff --git a/docs/bigquery-usage.rst b/docs/bigquery-usage.rst index d1689de22f6f..f2d0b4f6d21b 100644 --- a/docs/bigquery-usage.rst +++ b/docs/bigquery-usage.rst @@ -120,107 +120,64 @@ Tables Tables exist within datasets. List tables for the dataset: -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> tables, next_page_token = dataset.list_tables() # API request - >>> [table.name for table in tables] - ['table_name'] +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_list_tables] + :end-before: [END dataset_list_tables] Create a table: -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.create() # API request +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_create] + :end-before: [END table_create] Check for the existence of a table: -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.exists() # API request - True +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_exists] + :end-before: [END table_exists] Refresh metadata for a table (to pick up changes made by another client): -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.reload() # API request +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_reload] + :end-before: [END table_reload] Patch specific properties for a table: -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.patch(friendly_name='Person Ages', - ... description='Ages of persons') # API request +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_patch] + :end-before: [END table_patch] Update all writable metadata for a table -.. doctest:: - - >>> from gcloud import bigquery - >>> from gcloud.bigquery import SchemaField - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.schema = [ - ... SchemaField('full_name', 'STRING', mode='required'), - ... SchemaField('age', 'INTEGER', mode='required)] - >>> table.update() # API request +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_update] + :end-before: [END table_update] -Upload table data from a file: +Get rows from a table's data: -.. doctest:: +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_fetch_data] + :end-before: [END table_fetch_data] - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.schema = [ - ... SchemaField('full_name', 'STRING', mode='required'), - ... SchemaField('age', 'INTEGER', mode='required)] - >>> with open('person_ages.csv', 'rb') as csv_file: - ... table.upload_from_file(csv_file, 'CSV', - ... create_disposition='CREATE_IF_NEEDED') +Insert rows into a table's data: -Get rows from a table's data: +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_insert_data] + :end-before: [END table_insert_data] -.. doctest:: +Upload table data from a file: - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> rows, next_page_token = table.fetch_data(max_results=100) # API request - >>> for row in rows: - ... for field, value in zip(table.schema, row): - ... do_something(field, value) +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_upload_from_file] + :end-before: [END table_upload_from_file] Delete a table: -.. doctest:: +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_delete] + :end-before: [END table_delete] - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.delete() # API request Jobs ---- diff --git a/docs/bigquery_snippets.py b/docs/bigquery_snippets.py index 923d3fa12b50..1da17826043a 100644 --- a/docs/bigquery_snippets.py +++ b/docs/bigquery_snippets.py @@ -23,10 +23,30 @@ need to be deleted during teardown. """ +import operator import time +from gcloud.bigquery import SchemaField from gcloud.bigquery.client import Client +ORIGINAL_FRIENDLY_NAME = 'Original friendly name' +ORIGINAL_DESCRIPTION = 'Original description' +LOCALLY_CHANGED_FRIENDLY_NAME = 'Locally-changed friendly name' +LOCALLY_CHANGED_DESCRIPTION = 'Locally-changed description' +PATCHED_FRIENDLY_NAME = 'Patched friendly name' +PATCHED_DESCRIPTION = 'Patched description' +UPDATED_FRIENDLY_NAME = 'Updated friendly name' +UPDATED_DESCRIPTION = 'Updated description' + +SCHEMA = [ + SchemaField('full_name', 'STRING', mode='required'), + SchemaField('age', 'INTEGER', mode='required'), +] + +QUERY = ( + 'SELECT name FROM [bigquery-public-data:usa_names.usa_1910_2013] ' + 'WHERE state = "TX"') + def snippet(func): """Mark ``func`` as a snippet example function.""" @@ -38,6 +58,15 @@ def _millis(): return time.time() * 1000 +class _CloseOnDelete(object): + + def __init__(self, wrapped): + self._wrapped = wrapped + + def delete(self): + self._wrapped.close() + + @snippet def client_list_datasets(client, to_delete): # pylint: disable=unused-argument """List datasets for a project.""" @@ -87,8 +116,6 @@ def dataset_exists(client, to_delete): def dataset_reload(client, to_delete): """Reload a dataset's metadata.""" DATASET_NAME = 'dataset_reload_%d' % (_millis(),) - ORIGINAL_DESCRIPTION = 'Original description' - LOCALLY_CHANGED_DESCRIPTION = 'Locally-changed description' dataset = client.dataset(DATASET_NAME) dataset.description = ORIGINAL_DESCRIPTION dataset.create() @@ -107,8 +134,6 @@ def dataset_reload(client, to_delete): def dataset_patch(client, to_delete): """Patch a dataset's metadata.""" DATASET_NAME = 'dataset_patch_%d' % (_millis(),) - ORIGINAL_DESCRIPTION = 'Original description' - PATCHED_DESCRIPTION = 'Patched description' dataset = client.dataset(DATASET_NAME) dataset.description = ORIGINAL_DESCRIPTION dataset.create() @@ -130,8 +155,6 @@ def dataset_patch(client, to_delete): def dataset_update(client, to_delete): """Update a dataset's metadata.""" DATASET_NAME = 'dataset_update_%d' % (_millis(),) - ORIGINAL_DESCRIPTION = 'Original description' - UPDATED_DESCRIPTION = 'Updated description' dataset = client.dataset(DATASET_NAME) dataset.description = ORIGINAL_DESCRIPTION dataset.create() @@ -172,6 +195,268 @@ def dataset_delete(client, to_delete): # pylint: disable=unused-argument # [END dataset_delete] +@snippet +def dataset_list_tables(client, to_delete): + """List tables within a dataset.""" + DATASET_NAME = 'dataset_list_tables_dataset_%d' % (_millis(),) + TABLE_NAME = 'dataset_list_tables_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + to_delete.append(dataset) + + # [START dataset_list_tables] + tables, token = dataset.list_tables() # API request + assert len(tables) == 0 + assert token is None + table = dataset.table(TABLE_NAME) + table.view_query = QUERY + table.create() # API request + tables, token = dataset.list_tables() # API request + assert len(tables) == 1 + assert tables[0].name == TABLE_NAME + # [END dataset_list_tables] + to_delete.insert(0, table) + + +@snippet +def table_create(client, to_delete): + """Create a table.""" + DATASET_NAME = 'table_create_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_create_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + to_delete.append(dataset) + + # [START table_create] + table = dataset.table(TABLE_NAME, SCHEMA) + table.create() # API request + # [END table_create] + + to_delete.insert(0, table) + + +@snippet +def table_exists(client, to_delete): + """Test existence of a table.""" + DATASET_NAME = 'table_exists_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_exists_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + to_delete.append(dataset) + + # [START table_exists] + table = dataset.table(TABLE_NAME, SCHEMA) + assert not table.exists() # API request + table.create() # API request + assert table.exists() # API request + # [END table_exists] + + to_delete.insert(0, table) + + +@snippet +def table_reload(client, to_delete): + """Reload a table's metadata.""" + DATASET_NAME = 'table_reload_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_reload_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + to_delete.append(dataset) + + table = dataset.table(TABLE_NAME, SCHEMA) + table.friendly_name = ORIGINAL_FRIENDLY_NAME + table.description = ORIGINAL_DESCRIPTION + table.create() + to_delete.insert(0, table) + + # [START table_reload] + assert table.friendly_name == ORIGINAL_FRIENDLY_NAME + assert table.description == ORIGINAL_DESCRIPTION + table.friendly_name = LOCALLY_CHANGED_FRIENDLY_NAME + table.description = LOCALLY_CHANGED_DESCRIPTION + table.reload() # API request + assert table.friendly_name == ORIGINAL_FRIENDLY_NAME + assert table.description == ORIGINAL_DESCRIPTION + # [END table_reload] + + +@snippet +def table_patch(client, to_delete): + """Patch a table's metadata.""" + DATASET_NAME = 'table_patch_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_patch_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.description = ORIGINAL_DESCRIPTION + dataset.create() + to_delete.append(dataset) + + table = dataset.table(TABLE_NAME, SCHEMA) + table.friendly_name = ORIGINAL_FRIENDLY_NAME + table.description = ORIGINAL_DESCRIPTION + table.create() + to_delete.insert(0, table) + + # [START table_patch] + assert table.friendly_name == ORIGINAL_FRIENDLY_NAME + assert table.description == ORIGINAL_DESCRIPTION + table.patch( + friendly_name=PATCHED_FRIENDLY_NAME, + description=PATCHED_DESCRIPTION, + ) # API request + assert table.friendly_name == PATCHED_FRIENDLY_NAME + assert table.description == PATCHED_DESCRIPTION + # [END table_patch] + + +@snippet +def table_update(client, to_delete): + """Update a table's metadata.""" + DATASET_NAME = 'table_update_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_update_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.description = ORIGINAL_DESCRIPTION + dataset.create() + to_delete.append(dataset) + + table = dataset.table(TABLE_NAME, SCHEMA) + table.friendly_name = ORIGINAL_FRIENDLY_NAME + table.description = ORIGINAL_DESCRIPTION + table.create() + to_delete.insert(0, table) + + # [START table_update] + assert table.friendly_name == ORIGINAL_FRIENDLY_NAME + assert table.description == ORIGINAL_DESCRIPTION + NEW_SCHEMA = table.schema[:] + NEW_SCHEMA.append(SchemaField('phone', 'string')) + table.friendly_name = UPDATED_FRIENDLY_NAME + table.description = UPDATED_DESCRIPTION + table.schema = NEW_SCHEMA + table.update() # API request + assert table.friendly_name == UPDATED_FRIENDLY_NAME + assert table.description == UPDATED_DESCRIPTION + assert table.schema == NEW_SCHEMA + # [END table_update] + + +def _warm_up_inserted_table_data(table): + # Allow for 90 seconds of "warm up" before rows visible. See: + # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability + rows = () + counter = 18 + + while len(rows) == 0 and counter > 0: + counter -= 1 + rows, _, _ = table.fetch_data() + if len(rows) == 0: + time.sleep(5) + + +@snippet +def table_insert_fetch_data(client, to_delete): + """Insert / fetch table data.""" + DATASET_NAME = 'table_insert_fetch_data_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_insert_fetch_data_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + to_delete.append(dataset) + + table = dataset.table(TABLE_NAME, SCHEMA) + table.create() + to_delete.insert(0, table) + + # [START table_insert_data] + ROWS_TO_INSERT = [ + (u'Phred Phlyntstone', 32), + (u'Wylma Phlyntstone', 29), + ] + + table.insert_data(ROWS_TO_INSERT) + # [END table_insert_data] + + _warm_up_inserted_table_data(table) + + found_rows = [] + + def do_something(row): + found_rows.append(row) + + # [START table_fetch_data] + rows, _, token = table.fetch_data() + while True: + for row in rows: + do_something(row) + if token is None: + break + rows, _, token = table.fetch_data(page_token=token) + # [END table_fetch_data] + + assert len(found_rows) == len(ROWS_TO_INSERT) + by_age = operator.itemgetter(1) + found_rows = reversed(sorted(found_rows, key=by_age)) + for found, to_insert in zip(found_rows, ROWS_TO_INSERT): + assert found == to_insert + + +@snippet +def table_upload_from_file(client, to_delete): + """Upload table data from a CSV file.""" + import csv + import tempfile + DATASET_NAME = 'table_upload_from_file_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_upload_from_file_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + to_delete.append(dataset) + + table = dataset.table(TABLE_NAME, SCHEMA) + table.create() + to_delete.insert(0, table) + + csv_file = tempfile.NamedTemporaryFile(suffix='.csv') + to_delete.append(_CloseOnDelete(csv_file)) + + # [START table_upload_from_file] + writer = csv.writer(csv_file) + writer.writerow((b'full_name', b'age')) + writer.writerow((b'Phred Phlyntstone', b'32')) + writer.writerow((b'Wylma Phlyntstone', b'29')) + csv_file.flush() + + with open(csv_file.name, 'rb') as readable: + table.upload_from_file( + readable, source_format='CSV', skip_leading_rows=1) + # [END table_upload_from_file] + + _warm_up_inserted_table_data(table) + + rows, total, token = table.fetch_data() + + assert len(rows) == total == 2 + assert token is None + assert rows[0] == (u'Phred Phlyntstone', 32) + assert rows[1] == (u'Wylma Phlyntstone', 29) + + +@snippet +def table_delete(client, to_delete): # pylint: disable=unused-argument + """Delete a table.""" + DATASET_NAME = 'table_delete_dataset_%d' % (_millis(),) + TABLE_NAME = 'table_create_table_%d' % (_millis(),) + dataset = client.dataset(DATASET_NAME) + dataset.create() + to_delete.append(dataset) + + table = dataset.table(TABLE_NAME, SCHEMA) + table.create() + + # [START table_delete] + assert table.exists() # API request + table.delete() # API request + assert not table.exists() # API request + # [END table_delete] + + def _find_examples(): funcs = [obj for obj in globals().values() if getattr(obj, '_snippet', False)]