Skip to content

Commit

Permalink
feat: testing, comments and prepare for release (#13)
Browse files Browse the repository at this point in the history
* feat: testing, comments and prepare for release

* resolve comments
  • Loading branch information
qingzhuozhen authored Aug 15, 2022
1 parent db8c20b commit 1f38faf
Show file tree
Hide file tree
Showing 14 changed files with 202 additions and 24 deletions.
1 change: 1 addition & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
graft src/amplitude_experiment
37 changes: 30 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,24 @@ Amplitude Python Server SDK for Experiment.
pip install amplitude-experiment
```

## Quick Start
## Remote Evaluation Quick Start
```python
from amplitude_experiment import Experiment, Config, Client, User
from amplitude_experiment import Experiment, RemoteEvaluationConfig, RemoteEvaluationClient, User

# (1) Get your deployment's API key
apiKey = 'YOUR-API-KEY'

# (2) Initialize the experiment client
experiment = Experiment.initialize(api_key)
# (2) Initialize the experiment remote evaluation
experiment = Experiment.initialize_remote(api_key)

# (3) Fetch variants for a user
user = User(device_id="abcdefg", user_id="[email protected]", user_properties={
'premium': True
})
user = User(
device_id="abcdefg",
user_id="[email protected]",
user_properties={
'premium': True
}
)

# (4) Lookup a flag's variant
#
Expand All @@ -52,8 +56,27 @@ def fetch_callback(user, variants):
else:
# Flag is off

```

## Local Evaluation Quick Start
```python
# (1) Initialize the local evaluation client with a server deployment key.
experiment = Experiment.initialize_local(api_key)

# (2) Start the local evaluation client.
experiment.start()

# (3) Evaluate a user.
user = User(
device_id="abcdefg",
user_id="[email protected]",
user_properties={
'premium': True
}
)
variants = experiment.evaluate(user)
```

## More Information
Please visit our :100:[Developer Center](https://www.docs.developers.amplitude.com/experiment/sdks/python-sdk/) for more instructions on using our the SDK.

Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
keywords="amplitude, python, backend",
package_dir={"": "src"},
packages=["amplitude_experiment"],
include_package_data=True,
python_requires=">=3.6, <4",
license='MIT License',
project_urls={
Expand Down
2 changes: 1 addition & 1 deletion src/amplitude_experiment/connection_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def close(self) -> None:
self.stop_clear_conn()
pool, self._pool = self._pool, None
for conn in pool:
conn.close()
conn.stop()

def clear_idle_conn(self) -> None:
if self.is_closed:
Expand Down
Empty file.
40 changes: 32 additions & 8 deletions src/amplitude_experiment/local/client.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json
import logging
from threading import Lock
from typing import Any, List
from typing import Any, List, Dict

from .config import LocalEvaluationConfig
from ..user import User
Expand All @@ -12,7 +12,18 @@


class LocalEvaluationClient:
def __init__(self, api_key, config=None):
"""Experiment client for evaluating variants for a user locally."""

def __init__(self, api_key: str, config : LocalEvaluationConfig = None):
"""
Creates a new Experiment LocalEvaluationClient instance.
Parameters:
api_key (str): The environment API Key
config (LocalEvaluationConfig): Config Object
Returns:
Experiment Client instance.
"""
if not api_key:
raise ValueError("Experiment API key is empty")
self.api_key = api_key
Expand All @@ -27,10 +38,23 @@ def __init__(self, api_key, config=None):
self.lock = Lock()

def start(self):
"""
Fetch initial flag configurations and start polling for updates. You must call this function to begin
polling for flag config updates.
"""
self.__do_rules()
self.poller.start()

def evaluate(self, user: User, flag_keys: List[str] = None):
def evaluate(self, user: User, flag_keys: List[str] = None) -> Dict[str, Variant]:
"""
Locally evaluates flag variants for a user.
Parameters:
user (User): The user to evaluate
flag_keys (List[str]): The flags to evaluate with the user. If empty, all flags from the flag cache are evaluated.
Returns:
The evaluated variants.
"""
no_flag_keys = flag_keys is None or len(flag_keys) == 0
rules = []
for key, value in self.rules.items():
Expand Down Expand Up @@ -59,10 +83,10 @@ def __do_rules(self):
body = None
self.logger.debug('[Experiment] Get flag configs')
try:
response = conn.request('POST', '/sdk/rules?eval_mode=local', body, headers)
response = conn.request('GET', '/sdk/rules?eval_mode=local', body, headers)
response_body = response.read().decode("utf8")
if response.status != 200:
raise Exception(f"flagConfigs - received error response: ${response.status}: ${response_body}")
raise Exception(f"[Experiment] Get flagConfigs - received error response: ${response.status}: ${response_body}")
self.logger.debug(f"[Experiment] Got flag configs: {response_body}")
parsed_rules = self.__parse(json.loads(response_body))
self.lock.acquire()
Expand All @@ -83,9 +107,9 @@ def __setup_connection_pool(self):
self._connection_pool = HTTPConnectionPool(host, max_size=1, idle_timeout=30,
read_timeout=timeout, scheme=scheme)

def close(self) -> None:
def stop(self) -> None:
"""
Close resource like connection pool with client
Stop polling for flag configurations. Close resource like connection pool with client
"""
self.poller.stop()
self._connection_pool.close()
Expand All @@ -94,4 +118,4 @@ def __enter__(self) -> 'LocalEvaluationClient':
return self

def __exit__(self, *exit_info: Any) -> None:
self.close()
self.stop()
24 changes: 20 additions & 4 deletions src/amplitude_experiment/local/config.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,26 @@
class LocalEvaluationConfig:
"""Experiment Local Client Configuration"""

DEFAULT_SERVER_URL = 'https://api.lab.amplitude.com'

def __init__(self, debug=False,
server_url=DEFAULT_SERVER_URL,
flag_config_polling_interval_millis=30000,
flag_config_poller_request_timeout_millis=10000):
def __init__(self, debug: bool = False,
server_url: str = DEFAULT_SERVER_URL,
flag_config_polling_interval_millis: int = 30000,
flag_config_poller_request_timeout_millis: int = 10000):
"""
Initialize a config
Parameters:
debug (bool): Set to true to log some extra information to the console.
server_url (str): The server endpoint from which to request variants.
flag_config_polling_interval_millis (int): The interval in milliseconds to poll the amplitude server for
flag config updates. These rules are stored in memory and used when calling evaluate()
to perform local evaluation.
flag_config_poller_request_timeout_millis (int): The request timeout, in milliseconds,
used when fetching variants.
Returns:
The config object
"""
self.debug = debug
self.server_url = server_url
self.flag_config_polling_interval_millis = flag_config_polling_interval_millis
Expand Down
1 change: 1 addition & 0 deletions src/amplitude_experiment/local/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

3 changes: 3 additions & 0 deletions src/amplitude_experiment/local/poller.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@


class Poller:
"""
Poller to run a function every interval
"""
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
Expand Down
Empty file.
2 changes: 1 addition & 1 deletion src/amplitude_experiment/remote/config.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
class RemoteEvaluationConfig:
"""Experiment Client Configuration"""
"""Experiment Remote Client Configuration"""

DEFAULT_SERVER_URL = 'https://api.lab.amplitude.com'

Expand Down
2 changes: 1 addition & 1 deletion tests/factory_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def test_singleton_local_instance(self):
client1 = Experiment.initialize_local(API_KEY)
client2 = Experiment.initialize_local(API_KEY)
self.assertEqual(client1, client2)
client1.close()
client1.stop()


if __name__ == '__main__':
Expand Down
102 changes: 102 additions & 0 deletions tests/local/benchmark_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import random
import time
import unittest

from src.amplitude_experiment import LocalEvaluationClient, User

API_KEY = 'server-qz35UwzJ5akieoAdIgzM4m9MIiOLXLoz'


def random_boolean():
return bool(random.getrandbits(1))


def measure(function, *args, **kwargs):
start = time.time()
function(*args, **kwargs)
elapsed = (time.time() - start) * 1000
return elapsed


def random_string(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.choice(letters) for i in range(length))


def random_experiment_user():
n = 15
user = User(user_id=random_string(n))
if random_boolean():
user.device_id = random_string(n)
if random_boolean():
user.platform = random_string(n)
if random_boolean():
user.version = random_string(n)
if random_boolean():
user.os = random_string(n)
if random_boolean():
user.device_manufacturer = random_string(n)
if random_boolean():
user.device_model = random_string(n)
if random_boolean():
user.device_brand = random_string(n)
if random_boolean():
user.user_properties = {
'test': 'test'
}
return user


def random_benchmark_flag():
n = random.randint(1, 4)
return f"local-evaluation-benchmark-{n}"


class BenchmarkTestCase(unittest.TestCase):
_local_evaluation_client: LocalEvaluationClient = None

@classmethod
def setUpClass(cls) -> None:
cls._local_evaluation_client = LocalEvaluationClient(API_KEY)
cls._local_evaluation_client.start()

@classmethod
def tearDownClass(cls) -> None:
cls._local_evaluation_client.stop()

def test_evaluate_benchmark_1_flag_smaller_than_10_ms(self):
user = random_experiment_user()
flag = random_benchmark_flag()
duration = measure(self._local_evaluation_client.evaluate, user, [flag])
self.assertTrue(duration < 10)

def test_evaluate_benchmark_10_flag_smaller_than_10_ms(self):
total = 0
for i in range(10):
user = random_experiment_user()
flag = random_benchmark_flag()
duration = measure(self._local_evaluation_client.evaluate, user, [flag])
total += duration
self.assertTrue(total < 10)

def test_evaluate_benchmark_100_flag_smaller_than_100_ms(self):
total = 0
for i in range(100):
user = random_experiment_user()
flag = random_benchmark_flag()
duration = measure(self._local_evaluation_client.evaluate, user, [flag])
total += duration
self.assertTrue(total < 100)

def test_evaluate_benchmark_1000_flag_smaller_than_1000_ms(self):
total = 0
for i in range(1000):
user = random_experiment_user()
flag = random_benchmark_flag()
duration = measure(self._local_evaluation_client.evaluate, user, [flag])
total += duration
self.assertTrue(total < 1000)


if __name__ == '__main__':
unittest.main()
11 changes: 9 additions & 2 deletions tests/local/client_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@


class LocalEvaluationClientTestCase(unittest.TestCase):

_local_evaluation_client: LocalEvaluationClient = None

@classmethod
Expand All @@ -16,7 +15,10 @@ def setUpClass(cls) -> None:

@classmethod
def tearDownClass(cls) -> None:
cls._local_evaluation_client.close()
cls._local_evaluation_client.stop()

def test_initialize_raise_error(self):
self.assertRaises(ValueError, LocalEvaluationClient, "")

def test_evaluate_all_flags_success(self):
variants = self._local_evaluation_client.evaluate(test_user)
Expand All @@ -28,6 +30,11 @@ def test_evaluate_one_flag_success(self):
expected_variant = Variant('on', 'payload')
self.assertEqual(expected_variant, variants.get('sdk-local-evaluation-ci-test'))

def test_invalid_api_key_throw_exception(self):
invalid_local_api_key = 'client-DvWljIjiiuqLbyjqdvBaLFfEBrAvGuA3'
with LocalEvaluationClient(invalid_local_api_key) as test_client:
self.assertRaises(Exception, test_client.start, "[Experiment] Get flagConfigs - received error response")


if __name__ == '__main__':
unittest.main()

0 comments on commit 1f38faf

Please sign in to comment.