Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into dummy-test-change-2
Browse files Browse the repository at this point in the history
  • Loading branch information
grischperl committed Dec 12, 2024
2 parents cf79bbf + 05c366b commit f022733
Show file tree
Hide file tree
Showing 39 changed files with 905 additions and 436 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/unit-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,10 @@ jobs:
- name: Install dependencies
run: poetry install --with dev

- name: Create config.json
run: |
mkdir -p config
echo '{"mock-key": "mock-value"}' > config/config.json
- name: Run tests
run: poetry run poe test
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,9 @@ ENV/
env.bak/
venv.bak/

# Configuration
config/config.json

# Spyder project settings
.spyderproject
.spyproject
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ Because the companion uses the FastAPI framework, read the following documentati

### Configuration

For local development, LLMs can be configured inside the `config/models.yml` file.
For local development, you can configure LLMs by modifying the `config/config.json` file.
To use a configuration file from a different location, set the `CONFIG_PATH` environment variable to the path of your desired JSON configuration file.

## Code Checks

Expand Down
9 changes: 0 additions & 9 deletions config/config.yml
Original file line number Diff line number Diff line change
@@ -1,9 +0,0 @@
models:
- name: gpt-4o
deployment_id: d92ed4e1d4bafffc
temperature: 0
- name: gpt-4o-mini
deployment_id: d3da334def2bb9c8
temperature: 0
- name: text-embedding-3-large
deployment_id: d6a35fa3fd8ca8b2
19 changes: 17 additions & 2 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ langfuse = "^2.39.1"
langgraph = "^0.2.5"
poetry-plugin-sort = "^0.2.1"
pytest = "^8.2.2"
pytest-rerunfailures = "^15.0"
python-decouple = "^3.8"
redis = "^5.0.8"
requests = "^2.32.3"
Expand All @@ -61,9 +62,9 @@ ipython = "^8.26.0"
langfuse = "^2.38.0"
mypy = "^1.10.1"
poethepoet = "^0.27.0"
pytest-asyncio = "^0.23.0"
ruff = "v0.4.10"
types-requests = "^2.32.0.20240712"
pytest-asyncio = "^0.23.0"

[tool.pytest.ini_options]
minversion = "6.0"
Expand All @@ -84,9 +85,8 @@ format-fix = "black ."
lint-fix = "ruff check . --fix"
code-fix = ["format-fix", "lint-fix"]
test = "pytest tests/unit"
test-integration = "pytest tests/integration"
test-integration = "pytest tests/integration --reruns=3 --reruns-delay=30 -r=aR"
run = "fastapi run src/main.py --port 8000"
run-local = "fastapi dev src/main.py --port 8000"
sort = "poetry sort"
pre-commit-check = ["sort", "code-fix", "codecheck", "test"]

10 changes: 5 additions & 5 deletions scripts/k8s/companion-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,21 +97,21 @@ spec:
- name: REDIS_PORT
value: "6379"
- name: CONFIG_PATH
value: "/mnt/config/models-config.yml"
value: "/mnt/config/models-config.json"
envFrom:
- configMapRef:
name: ai-backend-config
volumeMounts:
- name: models-config
mountPath: /mnt/config/models-config.yml
subPath: models-config.yml
mountPath: /mnt/config/models-config.json
subPath: models-config.json
volumes:
- name: models-config
configMap:
name: ai-backend-config
items:
- key: models-config.yml
path: models-config.yml
- key: models-config.json
path: models-config.json

---
apiVersion: v1
Expand Down
6 changes: 5 additions & 1 deletion src/agents/common/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

FINAL_RESPONSE = "final_response"

GRAPH_STEP_TIMEOUT_SECONDS = 30
GRAPH_STEP_TIMEOUT_SECONDS = 60

IS_LAST_STEP = "is_last_step"

Expand All @@ -31,3 +31,7 @@
QUERY = "query"

OWNER = "owner"

K8S_AGENT = "KubernetesAgent"

KYMA_AGENT = "KymaAgent"
15 changes: 8 additions & 7 deletions src/agents/common/state.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from collections.abc import Sequence
from enum import Enum
from typing import Annotated
from typing import Annotated, Literal

from langchain_core.messages import BaseMessage
from langchain_core.pydantic_v1 import BaseModel, Field
from langgraph.graph import add_messages
from langgraph.managed import IsLastStep

from agents.common.constants import K8S_CLIENT
from agents.common.constants import COMMON, K8S_AGENT, K8S_CLIENT, KYMA_AGENT
from services.k8s import IK8sClient


Expand All @@ -21,10 +21,11 @@ class SubTaskStatus(str, Enum):
class SubTask(BaseModel):
"""Sub-task data model."""

description: str = Field(description="description of the task")
assigned_to: str = Field(description="agent to whom the task is assigned")
description: str = Field(
description="user query with original wording for the assigned agent"
)
assigned_to: Literal[KYMA_AGENT, K8S_AGENT, COMMON] # type: ignore
status: str = Field(default=SubTaskStatus.PENDING)
result: str | None

def complete(self) -> None:
"""Update the result of the task."""
Expand Down Expand Up @@ -64,11 +65,11 @@ class Plan(BaseModel):
"""Plan to follow in future"""

subtasks: list[SubTask] | None = Field(
description="different steps/subtasks to follow, should be in sorted order"
description="different subtasks for user query"
)

response: str | None = Field(
description="direct response of planner if plan is unnecessary"
description="only if query is not related to Kyma and Kubernetes"
)


Expand Down
4 changes: 1 addition & 3 deletions src/agents/k8s/agent.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
from agents.common.agent import BaseAgent
from agents.common.constants import GRAPH_STEP_TIMEOUT_SECONDS
from agents.common.constants import GRAPH_STEP_TIMEOUT_SECONDS, K8S_AGENT
from agents.k8s.prompts import K8S_AGENT_PROMPT
from agents.k8s.state import KubernetesAgentState
from agents.k8s.tools.logs import fetch_pod_logs_tool
from agents.k8s.tools.query import k8s_query_tool
from utils.models.factory import IModel

K8S_AGENT = "KubernetesAgent"


class KubernetesAgent(BaseAgent):
"""Kubernetes agent class."""
Expand Down
4 changes: 1 addition & 3 deletions src/agents/kyma/agent.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
from langchain_core.embeddings import Embeddings

from agents.common.agent import BaseAgent
from agents.common.constants import GRAPH_STEP_TIMEOUT_SECONDS
from agents.common.constants import GRAPH_STEP_TIMEOUT_SECONDS, KYMA_AGENT
from agents.kyma.prompts import KYMA_AGENT_PROMPT
from agents.kyma.state import KymaAgentState
from agents.kyma.tools.query import kyma_query_tool
from agents.kyma.tools.search import SearchKymaDocTool
from utils.models.factory import IModel, ModelType

KYMA_AGENT = "KymaAgent"


class KymaAgent(BaseAgent):
"""Kyma agent class."""
Expand Down
44 changes: 18 additions & 26 deletions src/agents/supervisor/agent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from typing import Any, Literal, cast

from langchain_core.embeddings import Embeddings
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.output_parsers import PydanticOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
Expand All @@ -12,14 +11,20 @@
from langgraph.graph.graph import CompiledGraph

from agents.common.constants import (
COMMON,
FINALIZER,
K8S_AGENT,
KYMA_AGENT,
MESSAGES,
NEXT,
PLANNER,
)
from agents.common.state import Plan
from agents.common.utils import create_node_output, filter_messages
from agents.supervisor.prompts import FINALIZER_PROMPT, PLANNER_PROMPT
from agents.supervisor.prompts import (
FINALIZER_PROMPT,
PLANNER_PROMPT,
)
from agents.supervisor.state import SupervisorState
from utils.filter_messages import (
filter_messages_via_checks,
Expand Down Expand Up @@ -126,25 +131,24 @@ def _create_planner_chain(self, model: IModel) -> RunnableSequence:
MessagesPlaceholder(variable_name="messages"),
]
).partial(
members=self._get_members_str(),
output_format=self.plan_parser.get_format_instructions(),
kyma_agent=KYMA_AGENT, kubernetes_agent=K8S_AGENT, common_agent=COMMON
)
return self.planner_prompt | model.llm # type: ignore
return self.planner_prompt | model.llm.with_structured_output(Plan) # type: ignore

async def _invoke_planner(self, state: SupervisorState) -> AIMessage:
async def _invoke_planner(self, state: SupervisorState) -> Plan:
"""Invoke the planner."""

filtered_messages = filter_messages_via_checks(
state.messages, [is_human_message, is_system_message, is_finalizer_message]
)
reduces_messages = filter_most_recent_messages(filtered_messages, 10)

response: AIMessage = await self._planner_chain.ainvoke(
plan: Plan = await self._planner_chain.ainvoke(
input={
"messages": reduces_messages,
},
)
return response
return plan

async def _plan(self, state: SupervisorState) -> dict[str, Any]:
"""
Expand All @@ -154,29 +158,17 @@ async def _plan(self, state: SupervisorState) -> dict[str, Any]:
state.error = None

try:
plan_response = await self._invoke_planner(
plan = await self._invoke_planner(
state, # last message is the user query
)
# get the content of the AIMessage
response_content = str(plan_response.content)

try:
# try to parse the JSON formatted Planner response into a Plan object
plan = self.plan_parser.parse(response_content)
# if the Planner responds directly, return the response and exit the graph
if plan.response:
return create_node_output(
message=AIMessage(content=plan.response, name=PLANNER),
next=END,
)
except OutputParserException as ope:
logger.debug(f"Problem in parsing the planner response: {ope}")
# If 'response' field of the content of plan_response is missing due to ModelType inconsistency,
# the response is read from the plan_response content.

# return the response if planner responded directly
if plan.response:
return create_node_output(
message=AIMessage(content=response_content, name=PLANNER),
message=AIMessage(content=plan.response, name=PLANNER),
next=END,
)

# if the Planner did not respond directly but also failed to create any subtasks, raise an exception
if not plan.subtasks:
raise Exception(
Expand Down
Loading

0 comments on commit f022733

Please sign in to comment.