Skip to content

Commit

Permalink
Bug fix (#321)
Browse files Browse the repository at this point in the history
* Change score method for MLSuperb

* Small fix
  • Loading branch information
remg1997 authored Jan 22, 2025
1 parent 1cc6b7b commit a7d469a
Showing 1 changed file with 25 additions and 9 deletions.
34 changes: 25 additions & 9 deletions backend/app/domain/services/base/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@

from app.domain.helpers.email import EmailHelper
from app.domain.services.base.dataset import DatasetService
from app.infrastructure.repositories.dataset import DatasetRepository
from app.infrastructure.repositories.model import ModelRepository
from app.infrastructure.repositories.round import RoundRepository
from app.infrastructure.repositories.score import ScoreRepository
from app.infrastructure.repositories.task import TaskRepository
from app.infrastructure.repositories.user import UserRepository
Expand All @@ -21,7 +23,9 @@ class ScoreService:
def __init__(self):
self.score_repository = ScoreRepository()
self.task_repository = TaskRepository()
self.dataset_repository = DatasetRepository()
self.dataset_service = DatasetService()
self.round_repository = RoundRepository()
self.model_repository = ModelRepository()
self.user_repository = UserRepository()
self.session = boto3.Session(
Expand All @@ -30,6 +34,7 @@ def __init__(self):
region_name=os.getenv("AWS_REGION"),
)
self.s3 = self.session.client("s3")
self.email_sender = os.getenv("MAIL_LOGIN")
self.email_helper = EmailHelper()

def get_scores_by_dataset_and_model_id(
Expand Down Expand Up @@ -368,30 +373,41 @@ def add_scores_and_update_model(
try:
model = self.model_repository.get_model_info_by_id(model_id)
user = self.user_repository.get_info_by_user_id(model["uid"])

if status_code != 200:
self.email_helper.send(
contact=user["email"],
contact=user.email,
cc_contact=self.email_sender,
template_name="model_inference_failed.txt",
msg_dict={"name": model["name"], "message": message},
subject=f"Model {model['name']} evaluation failed.",
)
print("error running inference")
print(message)
return {"response": "Error running instance"}
else:
datasets = self.dataset_repository.get_order_datasets_by_task_id(
model.task_id
model["tid"]
)
datasets = [dataset.__dict__ for dataset in datasets]
round_id = datasets[0]["rid"]
round_info = self.round_repository.get_round_info_by_round_and_task(
model["tid"], round_id
)
metadata_json = dict(scores)
scores["metadata_json"] = metadata_json
scores["mid"] = model_id
scores["did"] = datasets[0]["id"]
self.score_repository.add(scores)

new_score = {
"perf": metadata_json["Standard_CER_15_WORSE"],
"pretty_perf": f"{100*metadata_json['Standard_CER_15_WORSE']:.2f}%",
"mid": model_id,
"r_realid": round_info.id,
"did": datasets[0]["id"],
"metadata_json": json.dumps(metadata_json),
}

self.score_repository.add(new_score)

self.model_repository.update_model_status(model_id)
self.email_helper.send(
contact=user["email"],
contact=user.email,
cc_contact=self.email_sender,
template_name="model_evaluation_sucessful.txt",
msg_dict={"name": model["name"], "model_id": model["id"]},
Expand Down

0 comments on commit a7d469a

Please sign in to comment.