Skip to content

Commit

Permalink
Improve summary (#10)
Browse files Browse the repository at this point in the history
  • Loading branch information
kongzii authored Sep 10, 2024
1 parent aff9952 commit 9b2eaa7
Show file tree
Hide file tree
Showing 6 changed files with 413 additions and 228 deletions.
19 changes: 2 additions & 17 deletions labs_api/config.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,8 @@
import typing as t
from prediction_market_agent_tooling.config import APIKeys

from prediction_market_agent_tooling.tools.utils import check_not_none
from pydantic import SecretStr
from pydantic_settings import BaseSettings, SettingsConfigDict


class Config(BaseSettings):
model_config = SettingsConfigDict(
env_file=".env", env_file_encoding="utf-8", extra="ignore"
)

class Config(APIKeys):
HOST: str = "0.0.0.0"
PORT: int = 8080
WORKERS: int = 1
RELOAD: bool = True
SQLALCHEMY_DB_URL: t.Optional[SecretStr] = None

@property
def sqlalchemy_db_url(self) -> SecretStr:
return check_not_none(
self.SQLALCHEMY_DB_URL, "SQLALCHEMY_DB_URL missing in the environment."
)
70 changes: 69 additions & 1 deletion labs_api/insights.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,36 @@
import fastapi
from langchain.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from loguru import logger
from prediction_market_agent_tooling.config import APIKeys
from prediction_market_agent_tooling.loggers import logger
from prediction_market_agent_tooling.markets.omen.omen_subgraph_handler import (
HexAddress,
OmenMarket,
OmenSubgraphHandler,
)
from prediction_market_agent_tooling.tools.langfuse_ import (
get_langfuse_langchain_config,
observe,
)
from prediction_market_agent_tooling.tools.tavily_storage.tavily_models import (
TavilyResponse,
TavilyStorage,
)
from prediction_market_agent_tooling.tools.tavily_storage.tavily_storage import (
TavilyStorage,
tavily_search,
)
from prediction_market_agent_tooling.tools.utils import utcnow
from prediction_market_agent_tooling.tools.utils import (
LLM_SUPER_LOW_TEMPERATURE,
utcnow,
)

from labs_api.insights_cache import MarketInsightsResponseCache
from labs_api.models import MarketInsightsResponse


# Don't observe the cached version, as it will always return the same thing that's already been observed.
def market_insights_cached(
market_id: HexAddress, cache: MarketInsightsResponseCache
) -> MarketInsightsResponse:
Expand All @@ -28,6 +45,7 @@ def market_insights_cached(
return new


@observe()
def market_insights(market_id: HexAddress) -> MarketInsightsResponse:
"""Returns market insights for a given market on Omen."""
try:
Expand All @@ -47,8 +65,58 @@ def market_insights(market_id: HexAddress) -> MarketInsightsResponse:
except Exception as e:
logger.error(f"Failed to get tavily_response for market `{market_id}`: {e}")
tavily_response = None
try:
summary = (
tavily_response_to_summary(market, tavily_response)
if tavily_response is not None
else None
)
except Exception as e:
logger.warning(
f"Failed to generate short description for market `{market_id}`: {e}"
)
summary = None
return MarketInsightsResponse.from_tavily_response(
market_id=market_id,
created_at=utcnow(),
summary=summary,
tavily_response=tavily_response,
)


@observe()
def tavily_response_to_summary(
market: OmenMarket, tavily_response: TavilyResponse
) -> str:
contents = [result.content for result in tavily_response.results]

llm = ChatOpenAI(
model="gpt-4o-2024-08-06",
temperature=LLM_SUPER_LOW_TEMPERATURE,
api_key=APIKeys().openai_api_key_secretstr_v1,
)

prompt = ChatPromptTemplate.from_template(
template="""Based on the information provided, write a very brief, tweet-like summary about the current situation relevevant for the prediction market question.
In the summary:
- you should include the most important information that you think is relevant for the prediction market question
- never answer the question, only provide the context for the reader
- don't include any hashtags or links
- always end up telling the user to do their own research to make their own decision, but in a more polite manner guiding them to do so before placing any bets in the prediction market
Prediction Market question: {question}
Information: {information}
"""
)
messages = prompt.format_messages(
question=market.question_title, information=contents
)
completion = str(
llm.invoke(
messages, max_tokens=1024, config=get_langfuse_langchain_config()
).content
)

return completion
5 changes: 4 additions & 1 deletion labs_api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

import fastapi
import uvicorn
from config import Config
from fastapi.middleware.cors import CORSMiddleware
from prediction_market_agent_tooling.deploy.agent import initialize_langfuse
from prediction_market_agent_tooling.gtypes import HexAddress
from prediction_market_agent_tooling.loggers import logger

Expand All @@ -30,6 +30,9 @@ async def lifespan(app: fastapi.FastAPI) -> t.AsyncIterator[None]:
# At end of the service.
market_insights_cache.engine.dispose()

config = Config()
initialize_langfuse(config.default_enable_langfuse)

app = fastapi.FastAPI(lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
Expand Down
3 changes: 2 additions & 1 deletion labs_api/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,13 @@ def has_insights(self) -> bool:
def from_tavily_response(
market_id: HexAddress,
created_at: datetime,
summary: str | None,
tavily_response: t.Union[TavilyResponse, None],
) -> "MarketInsightsResponse":
return MarketInsightsResponse(
market_id=market_id,
created_at=created_at,
summary=tavily_response.answer if tavily_response else None,
summary=summary,
results=(
[
MarketInsightResult.from_tavily_result(result)
Expand Down
Loading

0 comments on commit 9b2eaa7

Please sign in to comment.