Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add locust for testing user/connection scaling #1742

Merged
merged 2 commits into from
Sep 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
128 changes: 128 additions & 0 deletions locust_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import random
import string

from locust import HttpUser, between, task

from memgpt.constants import BASE_TOOLS, DEFAULT_HUMAN, DEFAULT_PERSONA
from memgpt.schemas.agent import AgentState, CreateAgent
from memgpt.schemas.memgpt_request import MemGPTRequest
from memgpt.schemas.memgpt_response import MemGPTResponse
from memgpt.schemas.memory import ChatMemory
from memgpt.schemas.message import MessageCreate, MessageRole
from memgpt.utils import get_human_text, get_persona_text


class MemGPTUser(HttpUser):
wait_time = between(1, 5)
token = None
agent_id = None

def on_start(self):
# Create a user and get the token
self.client.headers = {"Authorization": "Bearer password"}
user_data = {"name": f"User-{''.join(random.choices(string.ascii_lowercase + string.digits, k=8))}"}
response = self.client.post("/admin/users", json=user_data)
response_json = response.json()
print(response_json)
self.user_id = response_json["id"]

# create a token
response = self.client.post("/admin/users/keys", json={"user_id": self.user_id})
self.token = response.json()["key"]

# reset to use user token as headers
self.client.headers = {"Authorization": f"Bearer {self.token}"}

# generate random name
name = "".join(random.choices(string.ascii_lowercase + string.digits, k=8))
request = CreateAgent(

Check failure on line 38 in locust_test.py

View workflow job for this annotation

GitHub Actions / Pyright types check (3.11)

Arguments missing for parameters "description", "metadata_", "user_id", "message_ids", "system", "llm_config", "embedding_config" (reportCallIssue)
name=f"Agent-{name}",
tools=BASE_TOOLS,
memory=ChatMemory(human=get_human_text(DEFAULT_HUMAN), persona=get_persona_text(DEFAULT_PERSONA)),

Check failure on line 41 in locust_test.py

View workflow job for this annotation

GitHub Actions / Pyright types check (3.11)

Argument of type "str | None" cannot be assigned to parameter "human" of type "str" in function "__init__"   Type "str | None" is not assignable to type "str"     "None" is not assignable to "str" (reportArgumentType)
)

# create an agent
with self.client.post("/api/agents", json=request.model_dump(), headers=self.client.headers, catch_response=True) as response:
if response.status_code != 200:
response.failure(f"Failed to create agent: {response.text}")

Check failure on line 47 in locust_test.py

View workflow job for this annotation

GitHub Actions / Pyright types check (3.11)

Cannot access attribute "failure" for class "Response"   Attribute "failure" is unknown (reportAttributeAccessIssue)

Check failure on line 47 in locust_test.py

View workflow job for this annotation

GitHub Actions / Pyright types check (3.11)

Cannot access attribute "failure" for class "LocustResponse"   Attribute "failure" is unknown (reportAttributeAccessIssue)

response_json = response.json()
agent_state = AgentState(**response_json)
self.agent_id = agent_state.id
print("Created agent", self.agent_id, agent_state.name)

@task(1)
def send_message(self):
messages = [MessageCreate(role=MessageRole("user"), text="hello")]

Check failure on line 56 in locust_test.py

View workflow job for this annotation

GitHub Actions / Pyright types check (3.11)

Argument missing for parameter "name" (reportCallIssue)
request = MemGPTRequest(messages=messages, stream_steps=False, stream_tokens=False, return_message_object=False)

with self.client.post(
f"/api/agents/{self.agent_id}/messages", json=request.model_dump(), headers=self.client.headers, catch_response=True
) as response:
if response.status_code != 200:
response.failure(f"Failed to send message: {response.text}")

Check failure on line 63 in locust_test.py

View workflow job for this annotation

GitHub Actions / Pyright types check (3.11)

Cannot access attribute "failure" for class "Response"   Attribute "failure" is unknown (reportAttributeAccessIssue)

Check failure on line 63 in locust_test.py

View workflow job for this annotation

GitHub Actions / Pyright types check (3.11)

Cannot access attribute "failure" for class "LocustResponse"   Attribute "failure" is unknown (reportAttributeAccessIssue)

response = MemGPTResponse(**response.json())
print("Response", response.usage)

# @task(1)
# def send_message_stream(self):

# messages = [MessageCreate(role=MessageRole("user"), text="hello")]
# request = MemGPTRequest(messages=messages, stream_steps=True, stream_tokens=True, return_message_object=True)
# if stream_tokens or stream_steps:
# from memgpt.client.streaming import _sse_post

# request.return_message_object = False
# return _sse_post(f"{self.base_url}/api/agents/{agent_id}/messages", request.model_dump(), self.headers)
# else:
# response = requests.post(f"{self.base_url}/api/agents/{agent_id}/messages", json=request.model_dump(), headers=self.headers)
# if response.status_code != 200:
# raise ValueError(f"Failed to send message: {response.text}")
# return MemGPTResponse(**response.json())
# try:
# response = self.memgpt_client.send_message(message="Hello, world!", agent_id=self.agent_id, role="user")
# except Exception as e:
# with self.client.get("/", catch_response=True) as response:
# response.failure(str(e))

# @task(2)
# def get_agent_state(self):
# try:
# agent_state = self.memgpt_client.get_agent(agent_id=self.agent_id)
# except Exception as e:
# with self.client.get("/", catch_response=True) as response:
# response.failure(str(e))

# @task(3)
# def get_agent_memory(self):
# try:
# memory = self.memgpt_client.get_in_context_memory(agent_id=self.agent_id)
# except Exception as e:
# with self.client.get("/", catch_response=True) as response:
# response.failure(str(e))


# class AdminUser(HttpUser):
# wait_time = between(5, 10)
# token = None
#
# def on_start(self):
# # Authenticate as admin
# self.client.headers = {"Authorization": "pasword"}
#
# @task
# def create_user(self):
# user_data = {
# "name": f"User-{''.join(random.choices(string.ascii_lowercase + string.digits, k=8))}"
# }
# self.client.post("/admin/users", json=user_data)
#
# @task
# def get_all_users(self):
# self.client.get("/admin/users")
#
# @task
# def get_all_agents(self):
# self.client.get("/api/admin/agents")
#
6 changes: 3 additions & 3 deletions memgpt/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,11 @@ def load(cls, llm_config: Optional[LLMConfig] = None, embedding_config: Optional
llm_config_dict = {k: v for k, v in llm_config_dict.items() if v is not None}
embedding_config_dict = {k: v for k, v in embedding_config_dict.items() if v is not None}
# Correct the types that aren't strings
if llm_config_dict["context_window"] is not None:
if "context_window" in llm_config_dict and llm_config_dict["context_window"] is not None:
llm_config_dict["context_window"] = int(llm_config_dict["context_window"])
if embedding_config_dict["embedding_dim"] is not None:
if "embedding_dim" in embedding_config_dict and embedding_config_dict["embedding_dim"] is not None:
embedding_config_dict["embedding_dim"] = int(embedding_config_dict["embedding_dim"])
if embedding_config_dict["embedding_chunk_size"] is not None:
if "embedding_chunk_size" in embedding_config_dict and embedding_config_dict["embedding_chunk_size"] is not None:
embedding_config_dict["embedding_chunk_size"] = int(embedding_config_dict["embedding_chunk_size"])
# Construct the inner properties
llm_config = LLMConfig(**llm_config_dict)
Expand Down
1 change: 1 addition & 0 deletions memgpt/llm_api/llm_api_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
AgentChunkStreamingInterface,
AgentRefreshStreamingInterface,
)
from memgpt.utils import json_dumps

LLM_API_PROVIDER_OPTIONS = ["openai", "azure", "anthropic", "google_ai", "cohere", "local"]

Expand Down
Loading
Loading