Skip to content

Commit

Permalink
Improve summary
Browse files Browse the repository at this point in the history
  • Loading branch information
kongzii committed Sep 9, 2024
1 parent aff9952 commit d8cf38a
Show file tree
Hide file tree
Showing 6 changed files with 203 additions and 17 deletions.
14 changes: 2 additions & 12 deletions labs_api/config.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,13 @@
import typing as t

from prediction_market_agent_tooling.tools.utils import check_not_none
from prediction_market_agent_tooling.config import APIKeys
from pydantic import SecretStr
from pydantic_settings import BaseSettings, SettingsConfigDict


class Config(BaseSettings):
model_config = SettingsConfigDict(
env_file=".env", env_file_encoding="utf-8", extra="ignore"
)

class Config(APIKeys):
HOST: str = "0.0.0.0"
PORT: int = 8080
WORKERS: int = 1
RELOAD: bool = True
SQLALCHEMY_DB_URL: t.Optional[SecretStr] = None

@property
def sqlalchemy_db_url(self) -> SecretStr:
return check_not_none(
self.SQLALCHEMY_DB_URL, "SQLALCHEMY_DB_URL missing in the environment."
)
70 changes: 69 additions & 1 deletion labs_api/insights.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,36 @@
import fastapi
from langchain.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from loguru import logger
from prediction_market_agent_tooling.config import APIKeys
from prediction_market_agent_tooling.loggers import logger
from prediction_market_agent_tooling.markets.omen.omen_subgraph_handler import (
HexAddress,
OmenMarket,
OmenSubgraphHandler,
)
from prediction_market_agent_tooling.tools.langfuse_ import (
get_langfuse_langchain_config,
observe,
)
from prediction_market_agent_tooling.tools.tavily_storage.tavily_models import (
TavilyResponse,
TavilyStorage,
)
from prediction_market_agent_tooling.tools.tavily_storage.tavily_storage import (
TavilyStorage,
tavily_search,
)
from prediction_market_agent_tooling.tools.utils import utcnow
from prediction_market_agent_tooling.tools.utils import (
LLM_SUPER_LOW_TEMPERATURE,
utcnow,
)

from labs_api.insights_cache import MarketInsightsResponseCache
from labs_api.models import MarketInsightsResponse


# Don't observe the cached version, as it will always return the same thing that's already been observed.
def market_insights_cached(
market_id: HexAddress, cache: MarketInsightsResponseCache
) -> MarketInsightsResponse:
Expand All @@ -28,6 +45,7 @@ def market_insights_cached(
return new


@observe()
def market_insights(market_id: HexAddress) -> MarketInsightsResponse:
"""Returns market insights for a given market on Omen."""
try:
Expand All @@ -47,8 +65,58 @@ def market_insights(market_id: HexAddress) -> MarketInsightsResponse:
except Exception as e:
logger.error(f"Failed to get tavily_response for market `{market_id}`: {e}")
tavily_response = None
try:
summary = (
tavily_response_to_summary(market, tavily_response)
if tavily_response is not None
else None
)
except Exception as e:
logger.warning(
f"Failed to generate short description for market `{market_id}`: {e}"
)
summary = None
return MarketInsightsResponse.from_tavily_response(
market_id=market_id,
created_at=utcnow(),
summary=summary,
tavily_response=tavily_response,
)


@observe()
def tavily_response_to_summary(
market: OmenMarket, tavily_response: TavilyResponse
) -> str:
contents = [result.content for result in tavily_response.results]

llm = ChatOpenAI(
model="gpt-4o-2024-08-06",
temperature=LLM_SUPER_LOW_TEMPERATURE,
api_key=APIKeys().openai_api_key_secretstr_v1,
)

prompt = ChatPromptTemplate.from_template(
template="""Based on the information provided, write a very brief, tweet-like summary about the current situation relevevant for the prediction market question.
In the summary:
- you should include the most important information that you think is relevant for the prediction market question
- never answer the question, only provide the context for the reader
- don't include any hashtags or links
- always end up telling the user to do their own research to make their own decision, but in a more polite manner guiding them to do so before placing any bets in the prediction market
Prediction Market question: {question}
Information: {information}
"""
)
messages = prompt.format_messages(
question=market.question_title, information=contents
)
completion = str(
llm.invoke(
messages, max_tokens=1024, config=get_langfuse_langchain_config()
).content
)

return completion
5 changes: 4 additions & 1 deletion labs_api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@

import fastapi
import uvicorn
from config import Config
from fastapi.middleware.cors import CORSMiddleware
from prediction_market_agent_tooling.gtypes import HexAddress
from prediction_market_agent_tooling.loggers import logger

from labs_api.config import Config
from labs_api.insights import MarketInsightsResponse, market_insights_cached
from labs_api.insights_cache import MarketInsightsResponseCache
from prediction_market_agent_tooling.deploy.agent import initialize_langfuse

HEX_ADDRESS_VALIDATOR = t.Annotated[
HexAddress,
Expand All @@ -30,6 +30,9 @@ async def lifespan(app: fastapi.FastAPI) -> t.AsyncIterator[None]:
# At end of the service.
market_insights_cache.engine.dispose()

config = Config()
initialize_langfuse(config.default_enable_langfuse)

app = fastapi.FastAPI(lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
Expand Down
3 changes: 2 additions & 1 deletion labs_api/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,13 @@ def has_insights(self) -> bool:
def from_tavily_response(
market_id: HexAddress,
created_at: datetime,
summary: str | None,
tavily_response: t.Union[TavilyResponse, None],
) -> "MarketInsightsResponse":
return MarketInsightsResponse(
market_id=market_id,
created_at=created_at,
summary=tavily_response.answer if tavily_response else None,
summary=summary,
results=(
[
MarketInsightResult.from_tavily_result(result)
Expand Down
123 changes: 122 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,15 @@ readme = "README.md"

[tool.poetry.dependencies]
python = "~3.10.0"
prediction-market-agent-tooling = { version = "^0.48.11" }
prediction-market-agent-tooling = { version = "^0.48.11", extras = ["openai", "langchain"] }
pydantic-settings = "^2.1.0"
tavily-python = "^0.3.1"
fastapi = "^0.111.0"
sqlmodel = "^0.0.21"
psycopg2-binary = "^2.9.9"
langchain = "^0.2.16"
langchain-openai = "^0.1.23"
langchain-community = "^0.2.16"

[tool.poetry.group.dev.dependencies]
pytest = "^8.2.1"
Expand Down

0 comments on commit d8cf38a

Please sign in to comment.