diff --git a/.github/containers/Dockerfile b/.github/containers/Dockerfile index d761b6f4ab..57d8c234c9 100644 --- a/.github/containers/Dockerfile +++ b/.github/containers/Dockerfile @@ -96,7 +96,7 @@ RUN echo 'eval "$(pyenv init -)"' >>$HOME/.bashrc && \ pyenv update # Install Python -ARG PYTHON_VERSIONS="3.10 3.9 3.8 3.7 3.11 2.7 pypy2.7-7.3.12 pypy3.8-7.3.11" +ARG PYTHON_VERSIONS="3.11 3.10 3.9 3.8 3.7 3.12 2.7 pypy2.7-7.3.12 pypy3.8-7.3.11" COPY --chown=1000:1000 --chmod=+x ./install-python.sh /tmp/install-python.sh RUN /tmp/install-python.sh && \ rm /tmp/install-python.sh diff --git a/.github/containers/Makefile b/.github/containers/Makefile index 4c057813d7..97b4e7256c 100644 --- a/.github/containers/Makefile +++ b/.github/containers/Makefile @@ -12,37 +12,60 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Repository root for mounting into container. -MAKEFILE_DIR:=$(dir $(realpath $(firstword $(MAKEFILE_LIST)))) -REPO_ROOT:=$(realpath $(MAKEFILE_DIR)../../) +# Override constants +PLATFORM_OVERRIDE:= +PYTHON_VERSIONS_OVERRIDE:= + +# Computed variables +IMAGE_NAME:=ghcr.io/newrelic/newrelic-python-agent-ci +MAKEFILE_DIR:=$(dir $(realpath $(firstword ${MAKEFILE_LIST}))) +REPO_ROOT:=$(realpath ${MAKEFILE_DIR}../../) +UNAME_P:=$(shell uname -p) +PLATFORM_AUTOMATIC:=$(if $(findstring arm,${UNAME_P}),linux/arm64,linux/amd64) +PLATFORM:=$(if ${PLATFORM_OVERRIDE},${PLATFORM_OVERRIDE},${PLATFORM_AUTOMATIC}) +PYTHON_VERSIONS_AUTOMATIC:=3.10 2.7 +PYTHON_VERSIONS:=$(if ${PYTHON_VERSIONS_OVERRIDE},${PYTHON_VERSIONS_OVERRIDE},${PYTHON_VERSIONS_AUTOMATIC}) .PHONY: default default: test -# Perform a shortened build for testing .PHONY: build build: - @docker build $(MAKEFILE_DIR) \ - -t ghcr.io/newrelic/newrelic-python-agent-ci:local \ - --build-arg='PYTHON_VERSIONS=3.10 2.7' - -# Ensure python versions are usable -.PHONY: test -test: build - @docker run --rm ghcr.io/newrelic/python-agent-ci:local /bin/bash -c '\ - python3.10 --version && \ - python2.7 --version && \ - touch tox.ini && tox --version && \ - echo "Success! Python versions installed."' + @docker build ${MAKEFILE_DIR} \ + --platform=${PLATFORM} \ + -t ${IMAGE_NAME}:local \ + --build-arg='PYTHON_VERSIONS=${PYTHON_VERSIONS}' +# Run the local tag as a container. .PHONY: run -run: build +run: run.local + +# Run a specific tag as a container. +# Usage: make run. +# Defaults to run.local, but can instead be run.latest or any other tag. +.PHONY: run.% +run.%: +# Build image if local was specified, else pull latest + @if [[ "$*" = "local" ]]; then cd ${MAKEFILE_DIR} && $(MAKE) build; else docker pull ${IMAGE_NAME}:$*; fi @docker run --rm -it \ - --mount type=bind,source="$(REPO_ROOT)",target=/home/github/python-agent \ + --platform=${PLATFORM} \ + --mount type=bind,source="${REPO_ROOT}",target=/home/github/python-agent \ --workdir=/home/github/python-agent \ --add-host=host.docker.internal:host-gateway \ -e NEW_RELIC_HOST="${NEW_RELIC_HOST}" \ -e NEW_RELIC_LICENSE_KEY="${NEW_RELIC_LICENSE_KEY}" \ -e NEW_RELIC_DEVELOPER_MODE="${NEW_RELIC_DEVELOPER_MODE}" \ -e GITHUB_ACTIONS="true" \ - ghcr.io/newrelic/newrelic-python-agent-ci:local /bin/bash + ${IMAGE_NAME}:$* /bin/bash + +# Ensure python versions are usable. Cannot be automatically used with PYTHON_VERSIONS_OVERRIDE. +.PHONY: test +test: build + @docker run --rm \ + --platform=${PLATFORM} \ + ghcr.io/newrelic/python-agent-ci:local \ + /bin/bash -c '\ + python3.10 --version && \ + python2.7 --version && \ + touch tox.ini && tox --version && \ + echo "Success! Python versions installed."' diff --git a/.github/workflows/build-ci-image.yml b/.github/workflows/build-ci-image.yml index 8bd904661a..9d60cea8ee 100644 --- a/.github/workflows/build-ci-image.yml +++ b/.github/workflows/build-ci-image.yml @@ -63,6 +63,6 @@ jobs: with: push: ${{ github.event_name != 'pull_request' }} context: .github/containers - platforms: ${{ (github.ref == 'refs/head/main') && 'linux/amd64,linux/arm64' || 'linux/amd64' }} + platforms: ${{ (format('refs/heads/{0}', github.event.repository.default_branch) == github.ref) && 'linux/amd64,linux/arm64' || 'linux/amd64' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/newrelic/hooks/datastore_aioredis.py b/newrelic/hooks/datastore_aioredis.py index 03c0f0900a..e27f8d7a99 100644 --- a/newrelic/hooks/datastore_aioredis.py +++ b/newrelic/hooks/datastore_aioredis.py @@ -22,6 +22,8 @@ _redis_operation_re, ) +AIOREDIS_VERSION = get_package_version_tuple("aioredis") + def _conn_attrs_to_dict(connection): host = getattr(connection, "host", None) @@ -58,14 +60,13 @@ def _nr_wrapper_AioRedis_method_(wrapped, instance, args, kwargs): # Check for transaction and return early if found. # Method will return synchronously without executing, # it will be added to the command stack and run later. - aioredis_version = get_package_version_tuple("aioredis") # This conditional is for versions of aioredis that are outside # New Relic's supportability window but will still work. New # Relic does not provide testing/support for this. In order to # keep functionality without affecting coverage metrics, this # segment is excluded from coverage analysis. - if aioredis_version and aioredis_version < (2,): # pragma: no cover + if AIOREDIS_VERSION and AIOREDIS_VERSION < (2,): # pragma: no cover # AioRedis v1 uses a RedisBuffer instead of a real connection for queueing up pipeline commands from aioredis.commands.transaction import _RedisBuffer @@ -75,7 +76,7 @@ def _nr_wrapper_AioRedis_method_(wrapped, instance, args, kwargs): return wrapped(*args, **kwargs) else: # AioRedis v2 uses a Pipeline object for a client and internally queues up pipeline commands - if aioredis_version: + if AIOREDIS_VERSION: from aioredis.client import Pipeline if isinstance(instance, Pipeline): return wrapped(*args, **kwargs) @@ -139,6 +140,7 @@ async def wrap_Connection_send_command(wrapped, instance, args, kwargs): ): return await wrapped(*args, **kwargs) + # This wrapper is for versions of aioredis that are outside # New Relic's supportability window but will still work. New # Relic does not provide testing/support for this. In order to diff --git a/tests/datastore_redis/test_custom_conn_pool.py b/tests/datastore_redis/test_custom_conn_pool.py index 8e4503b75d..b16a77f48d 100644 --- a/tests/datastore_redis/test_custom_conn_pool.py +++ b/tests/datastore_redis/test_custom_conn_pool.py @@ -12,22 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -''' The purpose of these tests is to confirm that using a non-standard +""" The purpose of these tests is to confirm that using a non-standard connection pool that does not have a `connection_kwargs` attribute will not result in an error. -''' +""" import pytest import redis - -from newrelic.api.background_task import background_task -from newrelic.common.package_version_utils import get_package_version_tuple - -from testing_support.fixtures import override_application_settings -from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from testing_support.db_settings import redis_settings +from testing_support.fixtures import override_application_settings from testing_support.util import instance_hostname +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version_tuple DB_SETTINGS = redis_settings()[0] REDIS_PY_VERSION = get_package_version_tuple("redis") @@ -45,13 +45,17 @@ def get_connection(self, name, *keys, **options): def release(self, connection): self.connection.disconnect() + def disconnect(self): + self.connection.disconnect() + + # Settings _enable_instance_settings = { - 'datastore_tracer.instance_reporting.enabled': True, + "datastore_tracer.instance_reporting.enabled": True, } _disable_instance_settings = { - 'datastore_tracer.instance_reporting.enabled': False, + "datastore_tracer.instance_reporting.enabled": False, } # Metrics @@ -61,98 +65,100 @@ def release(self, connection): datastore_all_metric_count = 5 if REDIS_PY_VERSION >= (5, 0) else 3 _base_scoped_metrics = [ - ('Datastore/operation/Redis/get', 1), - ('Datastore/operation/Redis/set', 1), - ('Datastore/operation/Redis/client_list', 1), + ("Datastore/operation/Redis/get", 1), + ("Datastore/operation/Redis/set", 1), + ("Datastore/operation/Redis/client_list", 1), ] # client_setinfo was introduced in v5.0.0 and assigns info displayed in client_list output if REDIS_PY_VERSION >= (5, 0): - _base_scoped_metrics.append(('Datastore/operation/Redis/client_setinfo', 2),) + _base_scoped_metrics.append( + ("Datastore/operation/Redis/client_setinfo", 2), + ) _base_rollup_metrics = [ - ('Datastore/all', datastore_all_metric_count), - ('Datastore/allOther', datastore_all_metric_count), - ('Datastore/Redis/all', datastore_all_metric_count), - ('Datastore/Redis/allOther', datastore_all_metric_count), - ('Datastore/operation/Redis/get', 1), - ('Datastore/operation/Redis/set', 1), - ('Datastore/operation/Redis/client_list', 1), + ("Datastore/all", datastore_all_metric_count), + ("Datastore/allOther", datastore_all_metric_count), + ("Datastore/Redis/all", datastore_all_metric_count), + ("Datastore/Redis/allOther", datastore_all_metric_count), + ("Datastore/operation/Redis/get", 1), + ("Datastore/operation/Redis/set", 1), + ("Datastore/operation/Redis/client_list", 1), ] if REDIS_PY_VERSION >= (5, 0): - _base_rollup_metrics.append(('Datastore/operation/Redis/client_setinfo', 2),) + _base_rollup_metrics.append( + ("Datastore/operation/Redis/client_setinfo", 2), + ) -_host = instance_hostname(DB_SETTINGS['host']) -_port = DB_SETTINGS['port'] +_host = instance_hostname(DB_SETTINGS["host"]) +_port = DB_SETTINGS["port"] -_instance_metric_name = 'Datastore/instance/Redis/%s/%s' % (_host, _port) +_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) instance_metric_count = 5 if REDIS_PY_VERSION >= (5, 0) else 3 -_enable_rollup_metrics = _base_rollup_metrics.append( - (_instance_metric_name, instance_metric_count) -) +_enable_rollup_metrics = _base_rollup_metrics.append((_instance_metric_name, instance_metric_count)) -_disable_rollup_metrics = _base_rollup_metrics.append( - (_instance_metric_name, None) -) +_disable_rollup_metrics = _base_rollup_metrics.append((_instance_metric_name, None)) # Operations + def exercise_redis(client): - client.set('key', 'value') - client.get('key') - client.execute_command('CLIENT', 'LIST', parse='LIST') + client.set("key", "value") + client.get("key") + client.execute_command("CLIENT", "LIST", parse="LIST") + # Tests -@pytest.mark.skipif(REDIS_PY_VERSION < (2, 7), - reason='Client list command introduced in 2.7') + +@pytest.mark.skipif(REDIS_PY_VERSION < (2, 7), reason="Client list command introduced in 2.7") @override_application_settings(_enable_instance_settings) @validate_transaction_metrics( - 'test_custom_conn_pool:test_fake_conn_pool_enable_instance', - scoped_metrics=_base_scoped_metrics, - rollup_metrics=_enable_rollup_metrics, - background_task=True) + "test_custom_conn_pool:test_fake_conn_pool_enable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_enable_rollup_metrics, + background_task=True, +) @background_task() def test_fake_conn_pool_enable_instance(): - client = redis.StrictRedis(host=DB_SETTINGS['host'], - port=DB_SETTINGS['port'], db=0) + client = redis.StrictRedis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) # Get a real connection - conn = client.connection_pool.get_connection('GET') + conn = client.connection_pool.get_connection("GET") # Replace the original connection pool with one that doesn't # have the `connection_kwargs` attribute. fake_pool = FakeConnectionPool(conn) client.connection_pool = fake_pool - assert not hasattr(client.connection_pool, 'connection_kwargs') + assert not hasattr(client.connection_pool, "connection_kwargs") exercise_redis(client) -@pytest.mark.skipif(REDIS_PY_VERSION < (2, 7), - reason='Client list command introduced in 2.7') + +@pytest.mark.skipif(REDIS_PY_VERSION < (2, 7), reason="Client list command introduced in 2.7") @override_application_settings(_disable_instance_settings) @validate_transaction_metrics( - 'test_custom_conn_pool:test_fake_conn_pool_disable_instance', - scoped_metrics=_base_scoped_metrics, - rollup_metrics=_disable_rollup_metrics, - background_task=True) + "test_custom_conn_pool:test_fake_conn_pool_disable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_disable_rollup_metrics, + background_task=True, +) @background_task() def test_fake_conn_pool_disable_instance(): - client = redis.StrictRedis(host=DB_SETTINGS['host'], - port=DB_SETTINGS['port'], db=0) + client = redis.StrictRedis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) # Get a real connection - conn = client.connection_pool.get_connection('GET') + conn = client.connection_pool.get_connection("GET") # Replace the original connection pool with one that doesn't # have the `connection_kwargs` attribute. fake_pool = FakeConnectionPool(conn) client.connection_pool = fake_pool - assert not hasattr(client.connection_pool, 'connection_kwargs') + assert not hasattr(client.connection_pool, "connection_kwargs") exercise_redis(client) diff --git a/tests/datastore_redis/test_uninstrumented_methods.py b/tests/datastore_redis/test_uninstrumented_methods.py index ccf5a096df..d86f4de955 100644 --- a/tests/datastore_redis/test_uninstrumented_methods.py +++ b/tests/datastore_redis/test_uninstrumented_methods.py @@ -39,6 +39,7 @@ "append_no_scale", "append_values_and_weights", "append_weights", + "auto_close_connection_pool", "batch_indexer", "BatchIndexer", "bulk", @@ -55,6 +56,7 @@ "edges", "execute_command", "flush", + "from_pool", "from_url", "get_connection_kwargs", "get_encoder", diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index b228cfbe48..900c43a812 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -12,12 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +import pprint + import pytest -from openai.util import convert_to_openai_object +from testing_support.fixture.event_loop import ( # noqa: F401; pylint: disable=W0611 + event_loop as loop, +) from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 collector_agent_registration_fixture, collector_available_fixture, ) +from testing_support.mock_external_openai_server import ( + MockExternalOpenAIServer, + extract_shortened_prompt, +) + +from newrelic.common.object_wrapper import wrap_function_wrapper _default_settings = { "transaction_tracer.explain_threshold": 0.0, @@ -33,49 +44,73 @@ linked_applications=["Python Agent Test (mlmodel_openai)"], ) +OPENAI_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "openai_audit.log") +OPENAI_AUDIT_LOG_CONTENTS = {} + + +@pytest.fixture(autouse=True, scope="session") +def openai_server(): + """ + This fixture will either create a mocked backend for testing purposes, or will + set up an audit log file to log responses of the real OpenAI backend to a file. + The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES=1 as + an environment variable to run using the real OpenAI backend. (Default: mocking) + """ + import openai + + from newrelic.core.config import _environ_as_bool + + if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES", False): + # Use mocked OpenAI backend and prerecorded responses + with MockExternalOpenAIServer() as server: + openai.api_base = "http://localhost:%d" % server.port + openai.api_key = "NOT-A-REAL-SECRET" + yield + else: + # Use real OpenAI backend and record responses + openai.api_key = os.environ.get("OPENAI_API_KEY", "") + if not openai.api_key: + raise RuntimeError("OPENAI_API_KEY environment variable required.") + + # Apply function wrappers to record data + wrap_function_wrapper("openai.api_requestor", "APIRequestor.request", wrap_openai_api_requestor_request) + yield # Run tests + + # Write responses to audit log + with open(OPENAI_AUDIT_LOG_FILE, "w") as audit_log_fp: + pprint.pprint(OPENAI_AUDIT_LOG_CONTENTS, stream=audit_log_fp) + + +# Intercept outgoing requests and log to file for mocking +RECORDED_HEADERS = set(["x-request-id", "content-type"]) + + +def wrap_openai_api_requestor_request(wrapped, instance, args, kwargs): + params = bind_request_params(*args, **kwargs) + if not params: + return wrapped(*args, **kwargs) + + prompt = extract_shortened_prompt(params) + + # Send request + result = wrapped(*args, **kwargs) + + # Clean up data + data = result[0].data + headers = result[0]._headers + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + headers.items(), + ) + ) + + # Log response + OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log + return result + -@pytest.fixture(autouse=True) -def openai_chat_completion_dict(): - return { - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": {"content": "212 degrees Fahrenheit is 100 degrees Celsius.", "role": "assistant"}, - } - ], - "created": 1676917710, - "id": "some-test-id-123456789", - "model": "gpt-3.5-turbo-0613", - "object": "chat.completion", - "usage": {"completion_tokens": 7, "prompt_tokens": 3, "total_tokens": 10}, - } - - -@pytest.fixture(autouse=True) -def openai_embedding_dict(): - return { - "data": [ - { - "embedding": [ - -0.006929283495992422, - -0.005336422007530928, - ], - "index": 0, - "object": "embedding", - } - ], - "model": "text-embedding-ada-002", - "object": "list", - "usage": {"prompt_tokens": 5, "total_tokens": 5}, - } - - -@pytest.fixture(autouse=True) -def openai_chat_completion_object(openai_chat_completion_dict): - return convert_to_openai_object(openai_chat_completion_dict) - - -@pytest.fixture(autouse=True) -def openai_embedding_object(openai_embedding_dict): - return convert_to_openai_object(openai_embedding_dict) +def bind_request_params(method, url, params=None, *args, **kwargs): + return params diff --git a/tests/mlmodel_openai/test_chat_completion.py b/tests/mlmodel_openai/test_chat_completion.py index b0b19b5098..b428f329f2 100644 --- a/tests/mlmodel_openai/test_chat_completion.py +++ b/tests/mlmodel_openai/test_chat_completion.py @@ -13,36 +13,26 @@ # limitations under the License. import openai -import pytest +_test_openai_chat_completion_sync_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is the boiling point of water?"}, + {"role": "assistant", "content": "The boiling point of water is 212 degrees Fahrenheit."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) -@pytest.fixture -def run_openai_chat_completion_sync(): + +def test_openai_chat_completion_sync(): openai.ChatCompletion.create( model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": "You are a scientist."}, - {"role": "user", "content": "What is the boiling point of water?"}, - {"role": "assistant", "content": "The boiling point of water is 212 degrees Fahrenheit."}, - {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, - ], + messages=_test_openai_chat_completion_sync_messages, ) -@pytest.fixture -def run_openai_chat_completion_async(): - openai.ChatCompletion.acreate( - model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": "You are a scientist."}, - {"role": "user", "content": "What is the boiling point of water?"}, - { - "role": "assistant", - "content": "The boiling point of water is 212 degrees Fahrenheit or 100 degrees Celsius.", - }, - ], +def test_openai_chat_completion_async(loop): + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_sync_messages, + ) ) - - -def test_no_harm(): - pass diff --git a/tests/mlmodel_openai/test_embeddings.py b/tests/mlmodel_openai/test_embeddings.py index e4265a4105..d5d2f996cb 100644 --- a/tests/mlmodel_openai/test_embeddings.py +++ b/tests/mlmodel_openai/test_embeddings.py @@ -13,18 +13,13 @@ # limitations under the License. import openai -import pytest -@pytest.fixture -def run_openai_embedding_sync(): - embedding = openai.Embedding.create(input="This is a test.", model="text-embedding-ada-002") +def test_openai_embedding_sync(): + openai.Embedding.create(input="This is an embedding test.", model="text-embedding-ada-002") -@pytest.fixture -def run_openai_embedding_async(): - embedding = openai.Embedding.acreate(input="This is a test.", model="text-embedding-ada-002") - - -def test_no_harm(): - pass +def test_openai_embedding_async(loop): + loop.run_until_complete( + openai.Embedding.acreate(input="This is an embedding test.", model="text-embedding-ada-002") + ) diff --git a/tests/testing_support/mock_external_openai_server.py b/tests/testing_support/mock_external_openai_server.py new file mode 100644 index 0000000000..438e4072d8 --- /dev/null +++ b/tests/testing_support/mock_external_openai_server.py @@ -0,0 +1,151 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from testing_support.mock_external_http_server import MockExternalHTTPServer + +# This defines an external server test apps can make requests to instead of +# the real OpenAI backend. This provides 3 features: +# +# 1) This removes dependencies on external websites. +# 2) Provides a better mechanism for making an external call in a test app than +# simple calling another endpoint the test app makes available because this +# server will not be instrumented meaning we don't have to sort through +# transactions to separate the ones created in the test app and the ones +# created by an external call. +# 3) This app runs on a separate thread meaning it won't block the test app. + +RESPONSES = { + "This is an embedding test.": ( + { + "Content-Type": "application/json", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "54", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "150000", + "x-ratelimit-remaining-requests": "197", + "x-ratelimit-remaining-tokens": "149994", + "x-ratelimit-reset-requests": "19m45.394s", + "x-ratelimit-reset-tokens": "2ms", + "x-request-id": "c70828b2293314366a76a2b1dcb20688", + }, + { + "data": [ + { + "embedding": "SLewvFF6iztXKj07UOCQO41IorspWOk79KHuu12FrbwjqLe8FCTnvBKqj7sz6bM8qqUEvFSfITpPrJu7uOSbPM8agzyYYqM7YJl/PBF2mryNN967uRiRO9lGcbszcuq7RZIavAnnNLwWA5s8mnb1vG+UGTyqpYS846PGO2M1X7wIxAO8HfgFvc8s8LuQXPQ5qgsKPOinEL15ndY8/MrOu1LRMTxCbQS7PEYJOyMx7rwDJj+79dVjO5P4UzmoPZq8jUgivL36UjzA/Lc8Jt6Ru4bKAL1jRiM70i5VO4neUjwneAy7mlNEPBVpoDuayo28TO2KvAmBrzzwvyy8B3/KO0ZgCry3sKa6QTmPO0a1Szz46Iw87AAcPF0O5DyJVZw8Ac+Yu1y3Pbqzesw8DUDAuq8hQbyALLy7TngmPL6lETxXxLc6TzXSvKJrYLy309c8OHa0OU3NZ7vru2K8mIXUPCxrErxLU5C5s/EVPI+wjLp7BcE74TvcO+2aFrx4A9w80j+Zu/aAojwmzU08k/hTvBpL4rvHFFQ76YftutrxL7wyxgK9BsIevLkYkTq4B028OZnlPPkcgjxhzfS79oCiuB34BbwITTq97nrzOugwRzwGS1U7CqTgvFxROLx4aWG7E/DxPA3J9jwd+AU8dVWPvGlc2jzwWae57nrzu569E72GU7e8Vn9+vFLA7TtVbZE8eOCqPG+3Sjxr5/W8s+DRPE+sm7wFKKQ8A8A5vUSBVryeIxk8hsqAPAeQjryeIxm8gU/tuxVpoDxVXM250GDlOlEDwjs0t6O8Tt6rOVrGHLvmyFy6dhI7PLPxlbv3YP88B/YTPEZgCrxqKsq8Xh+ou96wQLp5rpo8LSg+vL63/rsFjqk8E/DxPEi3MDzTcw66PjcqPNgSfLwqnaK85QuxPI7iHL2+pRE8Z+ICOxzEELvph+07jHqyu2ltnrwNQMC82BL8vAOdiDwSqo88CLM/PCKFBrzmP6a85Nc7PBaM0bvh1VY7NB2pvMkF9Tx3New87mgGPAoKZjo+nS+/Rk/GucqwMz3fwYS8yrCzPMo56jyDHV08XLe9vB4+aLwXwMY8dVUPvCFATbx2eMC8V7NzvEnrpTsIxIO7yVmNu2lc2ryGQnM8A6/1PH/VFbySO6g80i5VPOY/prv6cyi7W5QMPJVP+jsyLIi84H6wPKM50DrZNIS8UEaWPPrIaTzvrmg8rcoaPRuQm7ysH9y8OxIUO7ss4zq3Od08paG6vAPAuTjYAI88/qmCuuROhbzBMK08R4M7u67+j7uClKa6/KedOsqNArzysM08QJ8UvMD8t7v5P7M799fIvAWx2jxiEi48ja6nPL0LFzxFkpq7LAWNPA1AQLyWlLO6qrfxvOGypTxJUau8aJ8uPceLnTtS0TG9omtgPO7xPDvzbfm7FfJWu2CqwzwAASk96FN4PLPgUbwRdhq8Vn9+PLk7wjs8NUW84yx9vHJCZjzysM079hodO/NbDL2BxrY6CE26OzpEpDv7DaM8y0quO41IIr1+Kte8QdMJvKlxDzy9+lI8hfyQPA3J9jzWmKS7z6O5u4a5vLtXKj088XzYO1fEtzwY4/e7Js1NugbCnjymxOu7906SvPSPAb1ieDO8dnjAu/EW0zp/b5C8mGIjvWTPWTwIxIM8YgFqPKvrZrwKpOA7/jK5O2vViDyfaXs8DR2Pu0AFGrvTc446IIOhvDreHrxRnTw8ROdbu55Gyrsht5Y8tVmAvHK5rzzZvTo8bx1QPMglmLvigBU8oIuDvAFYz7pblIw8OZnlOsTvPbxhzfS8BxnFOpkwE72E60w7cNp7utp6ZrtvHdC4uwmyO5dRX7sAm6M7kqEtvElRK7yWg++7JHanvM6ACDvrZqG8Xh+oupQsyTwkZWO8VzuBu5xVKbzEZoc7wB9pvA796zyZlpi8YbsHvQs+W7u9cZy8gKMFOxYDGzyu7Uu71KeDPJxVqbxwyI68VpDCu9VT67xKqFG7KWmtuvNteTocs0w7aJ8uPMUSbzz6cyg8MiwIPEtlfTo+wOA75tkgu7VZgDw8WPa8mGIjPKq38bsr0Zc7Ot4evNNiyju9C5c7YCENPP6pAj3uV8I7X3bOusfxIjvpZLy655bMvL9ivbxO3iu8NKbfPNe7VTz9ZMk88RZTu5QsybxeQtk7qpTAOzGSjTxSwO27mGIjPO7OC7x7FoW8wJayvI2uJzttxqk84H4wOUtlfbxblAw8uTtCPIO3Vzxkz9k8ENwfvfQYuLvHFNQ8LvatPF65ojzPLHA8+RyCvK3Kmjx27wk8Dcn2PARatDv3tBc8hkLzPEOz5jyQSoe8gU/tPMRmhzzp2wU90shPPBv2oLsNQMA8jTdevIftMTt/Xsw7MMQdPICjBT012tS7SLewvJBtuDuevZM8LyojPa6HxjtOAd07v9mGusZXqDoPqKo8qdeUvETnW7y5occ5pOSOvPPkwjsDN4O8Mk85vKnXlDtp06O7kZDpO6GuNDtRFAY9lAkYPGHNdDx2Afc7RRtROy5/5LyUoxI9mu0+u/dOEryrYrC867vivJp29TtVbZG8SVGrO0im7LnhsqU80frfPL/IwryBT+07/+/kPLZ8sTwoNbg7ZkiIOxadlbxlnUm68RbTuxkX7Tu/cwG7aqGTPO8CAbzTYsq6AIpfvA50tbzllOc7s3rMO0SBVjzXzJm8eZ3Wu4vgtzwPDrA8W6b5uwJpEzwLtaQ81pgkPJuqarxmro288369u48WkjwREBU9JP/dPJ69kzvw4t27h3bouxhrBbwrNx29F9EKPFmSJ7v8px08Tt6rvEJthLxon648UYz4u61TUTz4lPQ7ERAVuhwqFrzfSjs8RRtRO6lxD7zHelm87lfCu10O5LrXMh886YftvL9iPTxCf/E6MZKNOmAhDb2diZ47eRSgPBfRCrznlsw5MiwIvHW7FD3tI807uG3SPE7eqzx1VY864TtcO3zTMDw7EhS8c+0kPLr47TvUDQm8domEvEi3MLruaAa7tUi8u4FgsTwbkBu6pQfAvEJthLwDnQg8S1OQO55GSrxZLCK8nkZKvFXTFr01dM+8W6Z5vO+u6Luh0eW8rofGvFsdw7x7KHK8sN5svCFAzbo/0SS8f9UVu7Qli7wr0Re95E4FvSg1ODok/907AAGpPHQhGrwtS++71pgkvCtazjsSzcC7exYFPLVZgLzZmom7W6Z5PHr0fLtn9O86oUivukvcRrzjPcE8a8REPAei+zoBNZ685aUrPNBg5bqeIxk8FJuwPPdOkrtUOZy8GRftO4KD4rz/72Q7ERCVu8WJODy5O8I5L7NZuxJECjxFkpq8Uq4AOy2fh7wY9Du8GRdtu48o/7mHdug803MOvCUQIrw2hZM8v+tzvE54pruyI6a6exYFvDXrGDwNQEA8zyxwO7c53TwUJGe8Wk9Tu6ouu7yqCwo8vi7IvNe71TxB04m8domEvKTkDrzsidK8+nOovLfT1zr11eM7SVErO3EOcbzqMqw74Tvcut4WRrz5pbi8oznQvMi/Er0aS+I87lfCvK+qdztd6zI83eJQPFy3vbyACQu9/8wzO/k/s7weG7e8906SPA3J9jw8NUU8TUQxPfEWU7wjH4E8J3gMPC72LTp6SJU8exaFOXBiibyf4MS6EXYaO3DIjjy61by7ACRaO5NvnTvMGB48Dw6wPFEUBr30j4E7niMZvIZC87s7EpS8OZnlPJZxgrxug9U7/DDUvNrxL7yV14e3E2c7PBdaQTwT8HE8oIuDPGIB6rvMB9o6cR+1OwbCHrylfgm8z6M5vIiqXbxFG1G8a9WIPItp7rpGT8Y838GEvAoK5jyAG3g7xRJvPPxBGLzJWQ28XYWtO85vRLp0IZq8cR81vc7mDb28PSe89LKyuig1uDyxEuK8GlwmPIbKgLwHGcW7/qkCvC8ZXzzSyE89F8BGOxPw8Tx+Ktc8BkvVurXiNryRkOk8jyj/OcKH0zp69Pw8apDPPFuUjLwPDrC8xuBeuD43KrxuYKQ7qXGPvF0OZDx1VQ88VVzNvD9rn7ushWE7EZlLvSL9+DrHi528dzXsu3k30bzeFka7hrm8vD3gAz1/Xsy80D20PNPZE7sorAG86WS8u2Y3xDtvHVC7PKwOO5DkAT3KOeo8c+0kvI+fyLuY61k8SKbsO4TrzLrrZqE87O9XvMkF9Tynb6q847SKvBjjdzyhSK88zTtPPNNzjjsvGV87UQPCvMD8t7stn4e7GRftPBQkZ7x4eiW7sqzcu3ufO7yAG3g8OHa0u0T4n7wcxJC7r6r3vAbCnrth3rg7BxnFumqQzzyXyCi8V8Q3vEPEqjyIu6E8Ac+YvGR6GLulkHY8um83PMqNgrv5pTi8N7kIPOhTeLy6TIY8B5COvDLGArvEzAy9IbcWvIUfQjxQ4BC7B/aTvCfwfrz15ie8ucR4PD1pursLtSS8AgMOOzIsiLv0srI7Q01hPCvRF7vySsg6O5tKunh6JTvCZCI7xuDevLc53btvLhQ8/pi+PJU9Dbugi4O8Qn/xvLpMhrth3ji8n/GIPKouu7tBS3y853MbPGAQyTt27wk7iokRO8d62bzZRnG7sN5svAG+1Lqvqve8JGXjur0Ll7tCf/E75/xRPIWFx7wgDNi8ucT4OZNvHb2nktu8qrfxuyR2J7zWh2A6juKcPDhlcLx/1RU9IAxYPGJ4szylB8C8qfrFO276HjuWcQK9QdOJvCUQIjzjo8a8SeslvBrCKztCf/E66MrBOx1eCz2Xt+Q66YdtvKg9mrrLSq47fFznO1uUjDsoNTg8QyqwuzH4Ejz/Zi67A8A5uKg9GrtFkhq862ahOzSmXzkMDEs8q+vmvNVkLzwc1n28mu0+vCbekTyCg+K7ekgVvO8CAT2yRtc8apBPu1b2R7zUp4M8VW2RvPc9zrx69Hw753ObvCcSB71sG+u8OwHQuv67b7zLSi65HrWxO0ZPRrxmwPq7t7CmPGxvAzygnfC8oIsDvKY7tbwZF+07p2+qvOnbhbv0oW47/2auuThlcDwIxIM8n/EIO6ijH7vHetk7uRiRPGUDT7pgh5I85shcPpGQabykShS7FWmgPPjojDvJ8wc8mlPEOY2uJzt7FoW7HNb9O7rVvDzKjQI80NcuuqvINbvNTBO8TgFdvEJ/cbzEZoe8SVGrvMvkqLyHdui7P2ufvBSbMDw0t6O82GaUPOLmGrxSNze8KVjpuwizPzwqjN48Xh8ovE4B3TtiAeo8azsOO8eLnbyO4py7x/GiPIvgNzzvi7c8BFq0O/dOEj1fU5282ZoJPCL9+LqyIyY8IoUGPNI/mbwKpGC7EkQKuzrN2jwVzyU7QpA1vLIjpjwi64s8HYE8u6eSW7yryLU8yK5OOzysjjwi6wu8GsIrOu7xPDwCaRO8dzVsPP/vZLwT3oQ8cQ7xvOJv0TtWBww8hlM3PBPeBDxT9OK71pgkPPSysrugiwO90GDlvHOHHz3xfNg8904SPVpglzzmP6a7Cgrmu9/BBLyH7bG85QsxvVSfIb2Xt2Q8paG6vOqYsTos9Mi8nqxPu8wHWjuYhdS7GAWAvCIOvTp/bxA8j7CMPG1P4Dxd67I7xxRUvOM9wbxMhwU9Kp0iPfF82LvQYOU6XkJZPBxNx7y0nX28B5COO8FT3rp4eiW8R/oEvSfw/jtC9rq8n/GIux3nQTw8WPY8LBf6uzSmXzzSPxm88rDNvDysDjwyPnW7tdFyPBLNwDo8WHa8bPi5vOO0CrylGAQ8YgFqvEFLfDy7LOO7TIeFPAHPmDv3YP+6/+9kPBKqjzt5rpo8VJ+hvE7eKzyc3t88P2sfvLQUR7wJ1vC6exaFvD6dr7zNO888i+A3ulwuhzuF/JC8gKMFveoyLLxqBxk7YgFquws+2zwOUYS8agcZvGJ4M71AjtC747QKvAizP73UH3a7LvatPJBtuLzEzIy8bG8DvJEHM75E59s7zbIYPObZIL2uZJW7WRveugblTzy6TIa802JKvD9rH7xlA088QAWavIFP7bwL2FW8vqWRu0ZgijyRkGm7ZGnUvIeHLD1c2m48THbBPPkcAr1NzWc8+JT0uulkvLvXMp+7lU96u7kYET1xhTo8e3wKvItGPTxb+hG87mgGPWqhk7uhrrQ73rBAPCbNTT13rDW8K8DTus8s8DsNt4k8gpQmPLES4ryyvSA8lcbDO60woDyLVwE9BFq0u+cNFj3C7Vi8UXoLPDYOyryQ0z083+S1Ox34hTzEzIw7pX4Ju6ouuzxIpmw8w5iXuylYaTy5sgu9Js3NOo+fyLyjFp+8MMSdvOROBb2n+OA7b7fKOeIJzDoNpkW8WsYct7SdfTxXxLc7TO2KO3YB9zynktu7OkSkPKnXFLvtRv47AJujuzGSDT0twjg8AgOOO4d26DvpZDy8lAkYPI5r0zcGS9W8OGXwu9xIVjyH7TG9IUDNuiqMXrwb9qA79I+BPL1xHLuVPY07MOfOO0ztCruvMoW8BuXPu4AbeLyIRNg8uG3SPO5XQjuFH0K8zm9EPEAoSz0tKL652ZqJOgABqbwsjsM8mlPEPLewpjsVWNw8OGXwOlYHjLzfwQQ81iFbOyJ0Qj3d85S7cQ7xvIqswjxKhSC7906SvAFYz72xiau8LAWNPB1eCz09jGu72ZoJPfDiXTwPDrA8CYGvvNH6XzxTa6y8+RwCvY8of7xxDnG8Ef/QvJ9p+zqh0eU8a16/OzBN1LyDLiE9PFh2u+0jTbxLUxA9ZZ3JvItXgbqL4Dc8BuXPvKnXFDzmPyY8k/hTOlum+bqAksG8OZnluPmluLxRnTy6/KcdvKAUOrzRcSm8fqEgPcTeebzeOXc8KCR0OnN2W7xRA0K8Wsacu+M9wToyLIi8mTATu21P4LuadvW8Dtq6vPmlODsjqLe88ieXPJEHszySoa08U/RiPNQNCbwb9qC8bG+DOXW7FL0OdLW7Tc3nvG8dULsAJNo7fNMwO7sJMr2O4hy85ZTnuwAkWjw+Nyq8rcoaO+8lsrvx86E8U/TivGUUkzp6SJW8lT0NvWz4uTzeFka6qguKvIKD4rt/1ZU8LBf6vD6dr7es/Ko7qWBLvIlVHDxwUUU6Jt4RvRJEijnRcSk88235PGvVCL3zbfm8DaZFO+7xvLs3qES8oznQO9XKNDxZLKK8IIMhvComWb0CAw48fDk2O+nbBb29C5e8ogVbu1EUBryYhdS7OTPgOul1AD25sgs7i1cBPBYmzLtSroA8hfyQvP3bErz9h/o82ZoJO7/ZhjxtT+A8UZ28uzaFk7wJ1nA6dd7FPGg5Kbwb9iC8psRrvBXyVjzGRuS8uAfNu0+smzvFAAK96FN4vC2fhzy65oC7tgXou/9mLjxMELw8GSgxPRBlVjxDxCq80j8ZveinkDxHgzu70j8ZvPGNnDyPn0i8Vn9+urXR8ju10fI7sRJiPDBemLt8OTa8tJ39O4ne0rsaXKa7t0ohPHQhGrdYXjI824sqvDw1RT2/2YY8E/BxPIUOfjv9dQ08PM8/PMwYHrwwXpi7nqxPPM8aA7w+wOC7ROdbO79iPTxVbRE8U45dPOOjRjxwYok8ME1Uu1SfIbyifKQ8UXqLPI85wzsITTq8R+lAPMRVQzzcv58892B/Oqg9mjw3MXu7P9EkvM6AiLyx7zA8eHolPLYWLLugFLq8AJsjvEOzZjk6RKQ8uRgRPXVVjzw0HSk9PWk6PLss47spzzK93rBAvJpTxDun+OC7OTPgvEa1yzvAH+k5fZDcOid4jLuN0di8N7kIPPe0F7wVaSC8zxoDvJVgvrvUpwO9dd7FPKUHQLxn4oI7Ng7KPIydYzzZRvE8LTkCu3bvCTy10fK7QAWaPGHeOLu6+O27omvgO8Rmh7xrXj87AzeDvORg8jnGRuS8UEYWPLPg0TvYZpQ9FJuwPLC7O7xug1U8bvoevAnW8DvxFtM8kEoHPDxYdrzcWZq8n3q/O94nCjvZI0C82yUlvayWpbyHh6y7ME1UO9b+KTzbFGG89oCiPFpgFzzhTKA84gnMPKgsVjyia+C7XNpuPHxc5zyDLqG8ukyGvKqUQLwG5U88wB/pO+B+ML2O4py8MOdOPHt8irsDnYg6rv6PumJ4szzuV0I80qWePKTkDj14A9y8fqEgu9DXLjykbUU7yEhJvLYFaLyfVw68", + "index": 0, + "object": "embedding", + } + ], + "model": "text-embedding-ada-002-v2", + "object": "list", + "usage": {"prompt_tokens": 6, "total_tokens": 6}, + }, + ), + "You are a scientist.": ( + { + "Content-Type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "1469", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "199", + "x-ratelimit-remaining-tokens": "39940", + "x-ratelimit-reset-requests": "7m12s", + "x-ratelimit-reset-tokens": "90ms", + "x-request-id": "49dbbffbd3c3f4612aa48def69059ccd", + }, + { + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "212 degrees " "Fahrenheit is " "equal to 100 " "degrees " "Celsius.", + "role": "assistant", + }, + } + ], + "created": 1696888863, + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "model": "gpt-3.5-turbo-0613", + "object": "chat.completion", + "usage": {"completion_tokens": 11, "prompt_tokens": 53, "total_tokens": 64}, + }, + ), +} + + +def simple_get(self): + content_len = int(self.headers.get("content-length")) + content = json.loads(self.rfile.read(content_len).decode("utf-8")) + + prompt = extract_shortened_prompt(content) + if not prompt: + self.send_response(500) + self.end_headers() + self.wfile.write("Could not parse prompt.".encode("utf-8")) + return + + headers, response = ({}, "") + for k, v in RESPONSES.items(): + if prompt.startswith(k): + headers, response = v + break + else: # If no matches found + self.send_response(500) + self.end_headers() + self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + return + + # Send response code + self.send_response(200) + + # Send headers + for k, v in headers.items(): + self.send_header(k, v) + self.end_headers() + + # Send response body + self.wfile.write(json.dumps(response).encode("utf-8")) + return + + +def extract_shortened_prompt(content): + prompt = ( + content.get("prompt", None) + or content.get("input", None) + or "\n".join(m["content"] for m in content.get("messages")) + ) + return prompt.lstrip().split("\n")[0] + + +class MockExternalOpenAIServer(MockExternalHTTPServer): + # To use this class in a test one needs to start and stop this server + # before and after making requests to the test app that makes the external + # calls. + + def __init__(self, handler=simple_get, port=None, *args, **kwargs): + super(MockExternalOpenAIServer, self).__init__(handler=handler, port=port, *args, **kwargs) + + +if __name__ == "__main__": + with MockExternalOpenAIServer() as server: + print("MockExternalOpenAIServer serving on port %s" % str(server.port)) + while True: + pass # Serve forever diff --git a/tox.ini b/tox.ini index ef7548cb55..8431b5ebd6 100644 --- a/tox.ini +++ b/tox.ini @@ -209,6 +209,8 @@ deps = component_flask_rest: jinja2 component_flask_rest: itsdangerous component_flask_rest-flaskrestxlatest: flask-restx + ; Pin Flask version until flask-restx is updated to support v3 + component_flask_rest-flaskrestxlatest: flask<3.0 component_flask_rest-flaskrestx051: flask-restx<1.0 component_graphqlserver: graphql-server[sanic,flask]==3.0.0b5 component_graphqlserver: sanic>20 @@ -342,7 +344,7 @@ deps = framework_tornado: pycurl framework_tornado-tornadolatest: tornado framework_tornado-tornadomaster: https://github.com/tornadoweb/tornado/archive/master.zip - mlmodel_openai: openai + mlmodel_openai: openai[datalib] logger_loguru-logurulatest: loguru logger_loguru-loguru06: loguru<0.7 logger_loguru-loguru05: loguru<0.6