diff --git a/sdk/monitor/azure-monitor-query/CHANGELOG.md b/sdk/monitor/azure-monitor-query/CHANGELOG.md index 93a955a2004f..cb3fe811522d 100644 --- a/sdk/monitor/azure-monitor-query/CHANGELOG.md +++ b/sdk/monitor/azure-monitor-query/CHANGELOG.md @@ -12,7 +12,9 @@ ### Breaking Changes - `LogsQueryResult` now iterates over the tables directly as a convinience. -- `query` API now returns a union of `LogsQueryPartialResult` and `LogsQueryResult`. +- `query` API in logs is renamed to `query_workspace` +- `query` API in metrics is renamed to `query_resource` +- `query_workspace` API now returns a union of `LogsQueryPartialResult` and `LogsQueryResult`. - `query_batch` API now returns a union of `LogsQueryPartialResult`, `LogsQueryError` and `LogsQueryResult`. - `metric_namespace` is renamed to `namespace` and is a keyword-only argument in `list_metric_definitions` API. diff --git a/sdk/monitor/azure-monitor-query/README.md b/sdk/monitor/azure-monitor-query/README.md index 9729da67de55..800f960ac7eb 100644 --- a/sdk/monitor/azure-monitor-query/README.md +++ b/sdk/monitor/azure-monitor-query/README.md @@ -122,7 +122,7 @@ start_time=datetime(2021, 7, 2) end_time=datetime.now() # returns LogsQueryResult -response = client.query( +response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=(start_time, end_time) @@ -230,7 +230,7 @@ from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential() client = LogsQueryClient(credential) -response = client.query( +response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], "range x from 1 to 10000000000 step 1 | count", server_timeout=1, @@ -250,7 +250,7 @@ The same logs query can be executed across multiple Log Analytics workspaces. In For example, the following query executes in three workspaces: ```python -client.query( +client.query_workspace( , query, additional_workspaces=['', ''] @@ -282,7 +282,7 @@ client = MetricsQueryClient(credential) start_time = datetime(2021, 5, 25) duration = timedelta(days=1) metrics_uri = os.environ['METRICS_RESOURCE_URI'] -response = client.query( +response = client.query_resource( metrics_uri, metric_names=["PublishSuccessCount"], timespan=(start_time, duration) @@ -328,7 +328,7 @@ credential = DefaultAzureCredential() client = MetricsQueryClient(credential) metrics_uri = os.environ['METRICS_RESOURCE_URI'] -response = client.query( +response = client.query_resource( metrics_uri, metric_names=["MatchedEventCount"], aggregations=[MetricAggregationType.COUNT] diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py index da0d8e173605..913e9988b518 100644 --- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py +++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py @@ -58,7 +58,7 @@ def __init__(self, credential, **kwargs): self._query_op = self._client.query @distributed_trace - def query(self, workspace_id, query, **kwargs): + def query_workspace(self, workspace_id, query, **kwargs): # type: (str, str, Any) -> Union[LogsQueryResult, LogsQueryPartialResult] """Execute an Analytics query. diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py index 0e41b59db673..eec3ef245342 100644 --- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py +++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py @@ -56,7 +56,7 @@ def __init__(self, credential, **kwargs): self._definitions_op = self._client.metric_definitions @distributed_trace - def query(self, resource_uri, metric_names, **kwargs): + def query_resource(self, resource_uri, metric_names, **kwargs): # type: (str, list, Optional[timedelta], Any) -> MetricsResult """Lists the metric values for a resource. diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py index 40d3c5124904..4698bf1c7ee5 100644 --- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py +++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py @@ -42,7 +42,7 @@ def __init__(self, credential: "AsyncTokenCredential", **kwargs: Any) -> None: self._query_op = self._client.query @distributed_trace_async - async def query( + async def query_workspace( self, workspace_id: str, query: str, diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py index c13a6759598c..67d2eb06bfec 100644 --- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py +++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py @@ -48,7 +48,7 @@ def __init__(self, credential: "AsyncTokenCredential", **kwargs: Any) -> None: self._definitions_op = self._client.metric_definitions @distributed_trace_async - async def query( + async def query_resource( self, resource_uri: str, metric_names: List, **kwargs: Any ) -> MetricsResult: """Lists the metric values for a resource. diff --git a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py index 15aa900e6a26..6e9822bb2cec 100644 --- a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py +++ b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py @@ -24,7 +24,7 @@ async def logs_query(): # returns LogsQueryResult async with client: - response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) + response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) if not response.tables: print("No results for the query") diff --git a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py index 4b52d4186807..978e60338922 100644 --- a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py +++ b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py @@ -17,7 +17,7 @@ async def query_metrics(): metrics_uri = os.environ['METRICS_RESOURCE_URI'] async with client: - response = await client.query( + response = await client.query_resource( metrics_uri, metric_names=["Ingress"], timespan=timedelta(hours=2), diff --git a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py index 64eba9420502..3b2ea6179d3c 100644 --- a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py +++ b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py @@ -21,7 +21,7 @@ # returns LogsQueryResult try: - response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) + response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) if response.status == LogsQueryStatus.PARTIAL: # handle error here error = response.partial_error diff --git a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py index da3aa5dd0cab..6285562580a2 100644 --- a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py +++ b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py @@ -17,7 +17,7 @@ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""" # returns LogsQueryResult -response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(hours=1)) +response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(hours=1)) if not response.tables: print("No results for the query") diff --git a/sdk/monitor/azure-monitor-query/samples/sample_log_query_multiple_workspaces.py b/sdk/monitor/azure-monitor-query/samples/sample_log_query_multiple_workspaces.py index 30de289faf34..556127a133da 100644 --- a/sdk/monitor/azure-monitor-query/samples/sample_log_query_multiple_workspaces.py +++ b/sdk/monitor/azure-monitor-query/samples/sample_log_query_multiple_workspaces.py @@ -19,7 +19,7 @@ end_time = datetime.now(UTC()) # returns LogsQueryResult -response = client.query( +response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, additional_workspaces=[os.environ["SECONDARY_WORKSPACE_ID"]], diff --git a/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py b/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py index 29feebc087ce..f08b2dd03316 100644 --- a/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py +++ b/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py @@ -17,7 +17,7 @@ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""" # returns LogsQueryResult -response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) +response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) try: table = response.tables[0] diff --git a/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py b/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py index 4027ddd1b308..3052ddda826f 100644 --- a/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py +++ b/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py @@ -17,7 +17,7 @@ # [START send_metrics_query] metrics_uri = os.environ['METRICS_RESOURCE_URI'] -response = client.query( +response = client.query_resource( metrics_uri, metric_names=["Ingress"], timespan=timedelta(hours=2), diff --git a/sdk/monitor/azure-monitor-query/samples/sample_server_timeout.py b/sdk/monitor/azure-monitor-query/samples/sample_server_timeout.py index 0f03fde23d0f..b350adf60cb7 100644 --- a/sdk/monitor/azure-monitor-query/samples/sample_server_timeout.py +++ b/sdk/monitor/azure-monitor-query/samples/sample_server_timeout.py @@ -11,7 +11,7 @@ client = LogsQueryClient(credential) -response = client.query( +response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], "range x from 1 to 10000000000 step 1 | count", server_timeout=1, diff --git a/sdk/monitor/azure-monitor-query/tests/async/test_exceptions_async.py b/sdk/monitor/azure-monitor-query/tests/async/test_exceptions_async.py index 99f3b2db2a02..2f6a922c4722 100644 --- a/sdk/monitor/azure-monitor-query/tests/async/test_exceptions_async.py +++ b/sdk/monitor/azure-monitor-query/tests/async/test_exceptions_async.py @@ -20,7 +20,7 @@ async def test_logs_single_query_fatal_exception(): credential = _credential() client = LogsQueryClient(credential) with pytest.raises(HttpResponseError): - await client.query('bad_workspace_id', 'AppRequests', timespan=None) + await client.query_workspace('bad_workspace_id', 'AppRequests', timespan=None) @pytest.mark.live_test_only @pytest.mark.asyncio @@ -30,7 +30,7 @@ async def test_logs_single_query_partial_exception_not_allowed(): query = """let Weight = 92233720368547758; range x from 1 to 3 step 1 | summarize percentilesw(x, Weight * 100, 50)""" - response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) + response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) assert response.__class__ == LogsQueryPartialResult assert response.partial_error is not None assert response.partial_error.code == 'PartialError' diff --git a/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py b/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py index 1894d2de3d84..98fb8d260dec 100644 --- a/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py +++ b/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py @@ -25,7 +25,7 @@ async def test_logs_auth(): summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""" # returns LogsQueryResult - response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) + response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) assert response is not None assert response.tables is not None @@ -41,7 +41,7 @@ async def test_logs_auth_no_timespan(): # returns LogsQueryResult with pytest.raises(TypeError): - await client.query(os.environ['LOG_WORKSPACE_ID'], query) + await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query) @pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/19917") @@ -50,7 +50,7 @@ async def test_logs_auth_no_timespan(): async def test_logs_server_timeout(): client = LogsQueryClient(_credential()) with pytest.raises(HttpResponseError) as e: - response = await client.query( + response = await client.query_workspace( os.environ['LOG_WORKSPACE_ID'], "range x from 1 to 10000000000 step 1 | count", timespan=None, @@ -111,7 +111,7 @@ async def test_logs_single_query_additional_workspaces_async(): query = "union * | where TimeGenerated > ago(100d) | project TenantId | summarize count() by TenantId" # returns LogsQueryResult - response = await client.query( + response = await client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=None, @@ -162,7 +162,7 @@ async def test_logs_single_query_with_render(): query = """AppRequests""" # returns LogsQueryResult - response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True) + response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True) assert response.visualization is not None @@ -174,7 +174,7 @@ async def test_logs_single_query_with_render_and_stats(): query = """AppRequests""" # returns LogsQueryResult - response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True, include_statistics=True) + response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True, include_statistics=True) assert response.visualization is not None assert response.statistics is not None @@ -186,7 +186,7 @@ async def test_logs_query_result_iterate_over_tables(): query = "AppRequests; AppRequests | take 5" - response = await client.query( + response = await client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=None, @@ -210,7 +210,7 @@ async def test_logs_query_result_row_type(): query = "AppRequests | take 5" - response = await client.query( + response = await client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=None, diff --git a/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py b/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py index ae9ba2f52b2d..a6336d486bd4 100644 --- a/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py +++ b/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py @@ -18,7 +18,7 @@ def _credential(): async def test_metrics_auth(): credential = _credential() client = MetricsQueryClient(credential) - response = await client.query( + response = await client.query_resource( os.environ['METRICS_RESOURCE_URI'], metric_names=["MatchedEventCount"], timespan=timedelta(days=1), @@ -32,7 +32,7 @@ async def test_metrics_auth(): async def test_metrics_granularity(): credential = _credential() client = MetricsQueryClient(credential) - response = await client.query( + response = await client.query_resource( os.environ['METRICS_RESOURCE_URI'], metric_names=["MatchedEventCount"], timespan=timedelta(days=1), diff --git a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py index f91e2c9a3b23..ed0513daa36d 100644 --- a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py +++ b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py @@ -46,7 +46,7 @@ def run_sync(self): Avoid putting any ancilliary logic (e.g. generating UUIDs), and put this in the setup/init instead so that we're only measuring the client API call. """ - self.metrics_client.query( + self.metrics_client.query_resource( self.metrics_uri, self.names, aggregations=self.aggregations @@ -59,7 +59,7 @@ async def run_async(self): Avoid putting any ancilliary logic (e.g. generating UUIDs), and put this in the setup/init instead so that we're only measuring the client API call. """ - await self.async_metrics_client.query( + await self.async_metrics_client.query_resource( self.metrics_uri, self.names, aggregations=self.aggregations diff --git a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py index 724c5dec3d95..e1d8effff32e 100644 --- a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py +++ b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py @@ -48,7 +48,7 @@ def run_sync(self): """ start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc) end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc) - self.logs_client.query( + self.logs_client.query_workspace( self.workspace_id, self.query, timespan=(start_time, end_time) @@ -63,7 +63,7 @@ async def run_async(self): """ start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc) end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc) - await self.async_logs_client.query( + await self.async_logs_client.query_workspace( self.workspace_id, self.query, timespan=(start_time, end_time) diff --git a/sdk/monitor/azure-monitor-query/tests/test_exceptions.py b/sdk/monitor/azure-monitor-query/tests/test_exceptions.py index df4a351802f2..c6a322a359da 100644 --- a/sdk/monitor/azure-monitor-query/tests/test_exceptions.py +++ b/sdk/monitor/azure-monitor-query/tests/test_exceptions.py @@ -18,7 +18,7 @@ def test_logs_single_query_fatal_exception(): credential = _credential() client = LogsQueryClient(credential) with pytest.raises(HttpResponseError): - client.query('bad_workspace_id', 'AppRequests', timespan=None) + client.query_workspace('bad_workspace_id', 'AppRequests', timespan=None) @pytest.mark.live_test_only def test_logs_single_query_partial_exception(): @@ -27,7 +27,7 @@ def test_logs_single_query_partial_exception(): query = """let Weight = 92233720368547758; range x from 1 to 3 step 1 | summarize percentilesw(x, Weight * 100, 50)""" - response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) + response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) assert response.__class__ == LogsQueryPartialResult assert response.partial_error is not None assert response.partial_data is not None diff --git a/sdk/monitor/azure-monitor-query/tests/test_logs_client.py b/sdk/monitor/azure-monitor-query/tests/test_logs_client.py index 6f3d85170e0e..51ef4e644086 100644 --- a/sdk/monitor/azure-monitor-query/tests/test_logs_client.py +++ b/sdk/monitor/azure-monitor-query/tests/test_logs_client.py @@ -22,7 +22,7 @@ def test_logs_single_query(): summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""" # returns LogsQueryResult - response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) + response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) assert response is not None assert response.tables is not None @@ -37,7 +37,7 @@ def test_logs_single_query_raises_no_timespan(): # returns LogsQueryResult with pytest.raises(TypeError): - client.query(os.environ['LOG_WORKSPACE_ID'], query) + client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query) @pytest.mark.live_test_only def test_logs_single_query_with_non_200(): @@ -47,7 +47,7 @@ def test_logs_single_query_with_non_200(): where TimeGenerated > ago(12h)""" with pytest.raises(HttpResponseError) as e: - client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) + client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) assert "SemanticError" in e.value.message @@ -58,7 +58,7 @@ def test_logs_single_query_with_partial_success(): query = """let Weight = 92233720368547758; range x from 1 to 3 step 1 | summarize percentilesw(x, Weight * 100, 50)""" - response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) + response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) assert response.partial_error is not None assert response.partial_data is not None @@ -70,7 +70,7 @@ def test_logs_server_timeout(): client = LogsQueryClient(_credential()) with pytest.raises(HttpResponseError) as e: - response = client.query( + response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], "range x from 1 to 1000000000000000 step 1 | count", timespan=None, @@ -120,7 +120,7 @@ def test_logs_single_query_with_statistics(): query = """AppRequests""" # returns LogsQueryResult - response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_statistics=True) + response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_statistics=True) assert response.statistics is not None @@ -131,7 +131,7 @@ def test_logs_single_query_with_render(): query = """AppRequests""" # returns LogsQueryResult - response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True) + response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True) assert response.visualization is not None @@ -142,7 +142,7 @@ def test_logs_single_query_with_render_and_stats(): query = """AppRequests""" # returns LogsQueryResult - response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True, include_statistics=True) + response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True, include_statistics=True) assert response.visualization is not None assert response.statistics is not None @@ -185,7 +185,7 @@ def test_logs_single_query_additional_workspaces(): query = "union * | where TimeGenerated > ago(100d) | project TenantId | summarize count() by TenantId" # returns LogsQueryResult - response = client.query( + response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=None, @@ -231,7 +231,7 @@ def test_logs_query_result_iterate_over_tables(): query = "AppRequests; AppRequests | take 5" - response = client.query( + response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=None, @@ -254,7 +254,7 @@ def test_logs_query_result_row_type(): query = "AppRequests | take 5" - response = client.query( + response = client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=None, diff --git a/sdk/monitor/azure-monitor-query/tests/test_logs_response.py b/sdk/monitor/azure-monitor-query/tests/test_logs_response.py index f3632faef97f..06881a6f8aed 100644 --- a/sdk/monitor/azure-monitor-query/tests/test_logs_response.py +++ b/sdk/monitor/azure-monitor-query/tests/test_logs_response.py @@ -22,7 +22,7 @@ def test_query_response_types(): summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId, Success, ItemCount, DurationMs""" # returns LogsQueryResult - result = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) + result = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) assert isinstance(result.tables[0].rows[0][0], datetime) # TimeGenerated generated is a datetime assert isinstance(result.tables[0].rows[0][1], six.string_types) # _ResourceId generated is a string assert isinstance(result.tables[0].rows[0][2], bool) # Success generated is a bool diff --git a/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py b/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py index b92ae331933b..d3d52fd8fde5 100644 --- a/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py +++ b/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py @@ -30,7 +30,7 @@ def callback(request): dic = json.loads(request.http_request.body) assert dic.get('timespan') is None # returns LogsQueryResult - client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) + client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) @pytest.mark.live_test_only def test_query_start_and_end_time(): @@ -45,7 +45,7 @@ def callback(request): dic = json.loads(request.http_request.body) assert dic.get('timespan') is not None - client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=(start_time, end_time), raw_request_hook=callback) + client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=(start_time, end_time), raw_request_hook=callback) @pytest.mark.live_test_only def test_query_duration_and_start_time(): @@ -61,7 +61,7 @@ def callback(request): dic = json.loads(request.http_request.body) assert '/PT259200.0S' in dic.get('timespan') - client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=(start_time,duration), raw_request_hook=callback) + client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=(start_time,duration), raw_request_hook=callback) @pytest.mark.live_test_only @@ -76,7 +76,7 @@ def callback(request): dic = json.loads(request.http_request.body) assert 'PT259200.0S' in dic.get('timespan') - client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=duration, raw_request_hook=callback) + client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=duration, raw_request_hook=callback) def test_duration_to_iso8601(): d1 = timedelta(days=1) diff --git a/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py b/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py index 17f61ccb812b..a4fa7cf726df 100644 --- a/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py +++ b/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py @@ -16,7 +16,7 @@ def _credential(): def test_metrics_auth(): credential = _credential() client = MetricsQueryClient(credential) - response = client.query( + response = client.query_resource( os.environ['METRICS_RESOURCE_URI'], metric_names=["MatchedEventCount"], timespan=timedelta(days=1), @@ -29,7 +29,7 @@ def test_metrics_auth(): def test_metrics_granularity(): credential = _credential() client = MetricsQueryClient(credential) - response = client.query( + response = client.query_resource( os.environ['METRICS_RESOURCE_URI'], metric_names=["MatchedEventCount"], timespan=timedelta(days=1),