Skip to content

Commit

Permalink
fix: linting in multiple files
Browse files Browse the repository at this point in the history
  • Loading branch information
Gabi Chueh committed Dec 6, 2024
1 parent cb912be commit dd78895
Show file tree
Hide file tree
Showing 8 changed files with 76 additions and 37 deletions.
1 change: 0 additions & 1 deletion examples/bot_building_series/bot_building_101.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import sys

import pandas as pd

from dfcx_scrapi.core.agents import Agents
from dfcx_scrapi.core.flows import Flows
from dfcx_scrapi.core.intents import Intents
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import sys

import pandas as pd

from dfcx_scrapi.core.agents import Agents
from dfcx_scrapi.tools.dataframe_functions import DataframeFunctions

Expand Down
3 changes: 1 addition & 2 deletions examples/dfcx_agent_cicd/cicd_code/export/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
import logging
import sys

from google.cloud import storage

from dfcx_scrapi.core.agents import Agents
from google.cloud import storage

from .flow_impacted import Impacted

Expand Down
3 changes: 1 addition & 2 deletions examples/dfcx_agent_cicd/cicd_code/shared/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,11 @@
import sys
import time

from google.cloud.dialogflowcx_v3beta1 import types

from dfcx_scrapi.core.agents import Agents
from dfcx_scrapi.core.environments import Environments
from dfcx_scrapi.core.flows import Flows
from dfcx_scrapi.core.versions import Versions
from google.cloud.dialogflowcx_v3beta1 import types

from .en_vs_other_lang import en_vs_lang
from .test_case_run import RunTestCases
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
""" Compare the fullfillments of en vs french langauge or etc"""
import pandas as pd

from dfcx_scrapi.core.flows import Flows

from .fullfillment_helper import (
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
""" Helper functions for en vs lang"""

from dfcx_scrapi.core.pages import Pages
from google.cloud.dialogflowcx_v3beta1.services import pages
from google.cloud.dialogflowcx_v3beta1.types import page as gcdc_page

from dfcx_scrapi.core.pages import Pages


class PagesChild(Pages):
"""
Expand Down
3 changes: 1 addition & 2 deletions src/agent_assist/agent_assist.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,13 @@
import logging
from typing import Dict

from dfcx_scrapi.core import scrapi_base
from google.cloud.dialogflow_v2beta1 import services, types
from google.cloud.dialogflow_v2beta1.services.knowledge_bases import (
KnowledgeBasesClient,
)
from google.protobuf import field_mask_pb2

from dfcx_scrapi.core import scrapi_base

# logging config
logging.basicConfig(
level=logging.INFO,
Expand Down
98 changes: 72 additions & 26 deletions src/dfcx_scrapi/tools/evaluations.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@
from ast import literal_eval
from dataclasses import dataclass, field
from datetime import datetime
from itertools import zip_longest
from typing import Any, Dict, List

import numpy as np
import pandas as pd
from itertools import zip_longest
from google.cloud.dialogflowcx_v3beta1 import types
from google.oauth2 import service_account
from tqdm import tqdm
Expand Down Expand Up @@ -177,7 +177,8 @@ def process_tool_invocations(
row: pd.Series,
df: pd.DataFrame,
) -> pd.DataFrame:
"""Process tool invocations and map them to the correct rows in the dataframe."""
"""Process tool invocations and map them
to the correct rows in the dataframe."""
# Get the list of indices where tool responses should be mapped
if row["tool_pair"] in [None, "", "NaN", "nan"]:
tool_index_list = [index]
Expand All @@ -191,7 +192,12 @@ def process_tool_invocations(
# If we have no tool responses but expected some
if not tool_responses and tool_index_list:
for idx in tool_index_list:
df.loc[int(idx), ["res_tool_name", "res_tool_action", "res_input_params", "res_output_params"]] = [
df.loc[int(idx), [
"res_tool_name",
"res_tool_action",
"res_input_params",
"res_output_params"
]] = [
"NO_TOOL_RESPONSE",
"NO_TOOL_RESPONSE",
"NO_TOOL_RESPONSE",
Expand All @@ -203,12 +209,25 @@ def process_tool_invocations(
for i, idx in enumerate(tool_index_list):
if i < len(tool_responses):
tool = tool_responses[i]
df.loc[int(idx), "res_tool_name"] = tool.get("tool_name", "")
df.loc[int(idx), "res_tool_action"] = tool.get("tool_action", "")
df.loc[int(idx), "res_input_params"] = str(tool.get("input_params", {}))
df.loc[int(idx), "res_output_params"] = str(tool.get("output_params", {}))
df.loc[int(idx), "res_tool_name"] = (
tool.get("tool_name", "")
)
df.loc[int(idx), "res_tool_action"] = (
tool.get("tool_action", "")
)
df.loc[int(idx), "res_input_params"] = (
str(tool.get("input_params", {}))
)
df.loc[int(idx), "res_output_params"] = (
str(tool.get("output_params", {}))
)
else:
df.loc[int(idx), ["res_tool_name", "res_tool_action", "res_input_params", "res_output_params"]] = [
df.loc[int(idx), [
"res_tool_name",
"res_tool_action",
"res_input_params",
"res_output_params"
]] = [
"NO_TOOL_RESPONSE",
"NO_TOOL_RESPONSE",
"NO_TOOL_RESPONSE",
Expand Down Expand Up @@ -426,7 +445,8 @@ def run_detect_intent_queries(self, df: pd.DataFrame) -> pd.DataFrame:
]
text_res = self.ar._extract_text(res)

utterance_idx = int(row["utterance_pair"] or index) # if utterance_pair is empty/''/NaN, use index
# if utterance_pair is empty/''/NaN, use index
utterance_idx = int(row["utterance_pair"] or index)
df.loc[utterance_idx, ["agent_response"]] = [text_res]

# Handle Playbook Invocations
Expand All @@ -447,9 +467,16 @@ def run_detect_intent_queries(self, df: pd.DataFrame) -> pd.DataFrame:

# Handle Tool Invocations
if "tool_call_quality" in self.user_input_metrics:
tool_responses = self.sessions_client.collect_tool_responses(res)
tool_responses = (
self.sessions_client.collect_tool_responses(res)
)
if tool_responses: # Only call if not empty
df = self.process_tool_invocations(tool_responses, index, row, df)
df = self.process_tool_invocations(
tool_responses,
index,
row,
df
)

return df

Expand Down Expand Up @@ -570,23 +597,36 @@ def get_matching_list_idx(a, b):
@staticmethod
def pair_utterances(df: pd.DataFrame) -> pd.DataFrame:
"""
Identifies pairings of user_utterance and agent_utterance by eval_id.
Handles cases where a user utterance has no corresponding agent response.
Identifies pairings of user_utterance and
agent_utterance by eval_id.
Handles cases where a user utterance has no
corresponding agent response.
"""
df["utterance_pair"] = np.nan # Initialize with NaN for missing pairs

grouped = df.groupby("eval_id")

for _, group in grouped:
user_utterances = group[group["action_type"] == "User Utterance"].index.tolist()
agent_responses = group[group["action_type"] == "Agent Response"].index.tolist()
user_utterances = (
group[group["action_type"] == "User Utterance"]
.index
.tolist()
)
agent_responses = (
group[group["action_type"] == "Agent Response"]
.index
.tolist()
)

# Use zip_longest to handle unequal list lengths
for user_idx, agent_idx in zip_longest(user_utterances, agent_responses):
for user_idx, agent_idx in zip_longest(
user_utterances,
agent_responses
):
if agent_idx is not None: # Check if agent response exists
df.loc[user_idx, "utterance_pair"] = str(agent_idx)
else: # Assign NaN if there is no agent_response
df.loc[user_idx, "utterance_pair"] = np.nan # or "NO_AGENT_RESPONSE" if needed
df.loc[user_idx, "utterance_pair"] = np.nan

return df

Expand Down Expand Up @@ -616,7 +656,8 @@ def get_model_name(settings: types.GenerativeSettings) -> str:
return model_map.get(model_name, "")

def pair_tool_calls(self, df: pd.DataFrame) -> pd.DataFrame:
"""Associates user utterances with the indices of subsequent tool invocations."""
"""Associates user utterances with the
indices of subsequent tool invocations."""

df["tool_pair"] = "" # Initialize as empty string
grouped = df.groupby("eval_id")
Expand All @@ -627,20 +668,25 @@ def pair_tool_calls(self, df: pd.DataFrame) -> pd.DataFrame:

for index, row in group.iterrows():
if row["action_type"] == "User Utterance":
# Assign accumulated tool indices to the *previous* user utterance (if any)
# Assign accumulated tool indices to
# the *previous* user utterance (if any)
if last_user_utterance_index is not None:
df.loc[last_user_utterance_index, "tool_pair"] = str(tool_indices)

df.loc[last_user_utterance_index, "tool_pair"] = (
str(tool_indices)
)
# Reset for the current user utterance:
tool_indices = [] # Clear the list for the current user utterance
last_user_utterance_index = index # Update the user utterance index
tool_indices = []
last_user_utterance_index = index

elif row["action_type"] == "Tool Invocation":
tool_indices.append(index)

# After processing the group, assign any remaining tool indices to the last user utterance
# After processing the group, assign any remaining
# tool indices to the last user utterance
if last_user_utterance_index is not None and tool_indices:
df.loc[last_user_utterance_index, "tool_pair"] = str(tool_indices)
df.loc[last_user_utterance_index, "tool_pair"] = (
str(tool_indices)
)

return df

Expand Down Expand Up @@ -854,4 +900,4 @@ def from_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
"""Load eval dataset from local premade dataframe."""
df = self.validate_and_prep_inputs(df)

return df
return df

0 comments on commit dd78895

Please sign in to comment.