Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix current version bug #366

Open
wants to merge 5 commits into
base: release-2024.4.4
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/mavedb/lib/validation/urn_re.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
MAVEDB_TMP_URN_PATTERN = r"tmp:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
MAVEDB_TMP_URN_RE = re.compile(MAVEDB_TMP_URN_PATTERN)

# Old temp URN
MAVEDB_OLD_TMP_URN_PATTERN = r"^tmp:[A-Za-z0-9]{16}$"
MAVEDB_OLD_TMP_URN_RE = re.compile(MAVEDB_OLD_TMP_URN_PATTERN)

# Experiment set URN
MAVEDB_EXPERIMENT_SET_URN_PATTERN = rf"urn:{MAVEDB_URN_NAMESPACE}:\d{{{MAVEDB_EXPERIMENT_SET_URN_DIGITS}}}"
MAVEDB_EXPERIMENT_SET_URN_RE = re.compile(MAVEDB_EXPERIMENT_SET_URN_PATTERN)
Expand Down
146 changes: 76 additions & 70 deletions src/mavedb/routers/score_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
logging_context,
save_to_logging_context,
)
from mavedb.lib.permissions import Action, assert_permission
from mavedb.lib.permissions import Action, assert_permission, has_permission
from mavedb.lib.score_sets import (
csv_data_to_df,
find_meta_analyses_for_experiment_sets,
Expand Down Expand Up @@ -64,7 +64,7 @@


async def fetch_score_set_by_urn(
db, urn: str, user: Optional[UserData], owner_or_contributor: Optional[UserData], only_published: bool
db, urn: str, user: Optional[UserData], owner_or_contributor: Optional[UserData], only_published: bool
) -> Optional[ScoreSet]:
"""
Fetch one score set by URN, ensuring that the user has read permission.
Expand Down Expand Up @@ -103,6 +103,10 @@ async def fetch_score_set_by_urn(
raise HTTPException(status_code=404, detail=f"score set with URN '{urn}' not found")

assert_permission(user, item, Action.READ)

if item.superseding_score_set and not has_permission(user, item.superseding_score_set, Action.READ).permitted:
item.superseding_score_set = None

return item


Expand All @@ -128,9 +132,9 @@ def search_score_sets(search: ScoreSetsSearch, db: Session = Depends(deps.get_db
response_model=list[score_set.ShortScoreSet],
)
def search_my_score_sets(
search: ScoreSetsSearch, # = Body(..., embed=True),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
search: ScoreSetsSearch, # = Body(..., embed=True),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
) -> Any:
"""
Search score sets created by the current user..
Expand All @@ -146,10 +150,10 @@ def search_my_score_sets(
response_model_exclude_none=True,
)
async def show_score_set(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(get_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(get_current_user),
) -> Any:
"""
Fetch a single score set by URN.
Expand All @@ -165,17 +169,17 @@ async def show_score_set(
200: {
"content": {"text/csv": {}},
"description": """Variant scores in CSV format, with four fixed columns (accession, hgvs_nt, hgvs_pro,"""
""" and hgvs_splice), plus score columns defined by the score set.""",
""" and hgvs_splice), plus score columns defined by the score set.""",
}
},
)
def get_score_set_scores_csv(
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
) -> Any:
"""
Return scores from a score set, identified by URN, in CSV format.
Expand Down Expand Up @@ -219,17 +223,17 @@ def get_score_set_scores_csv(
200: {
"content": {"text/csv": {}},
"description": """Variant counts in CSV format, with four fixed columns (accession, hgvs_nt, hgvs_pro,"""
""" and hgvs_splice), plus score columns defined by the score set.""",
""" and hgvs_splice), plus score columns defined by the score set.""",
}
},
)
async def get_score_set_counts_csv(
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
) -> Any:
"""
Return counts from a score set, identified by URN, in CSV format.
Expand Down Expand Up @@ -272,10 +276,10 @@ async def get_score_set_counts_csv(
response_model=list[mapped_variant.MappedVariant],
)
def get_score_set_mapped_variants(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
) -> Any:
"""
Return mapped variants from a score set, identified by URN.
Expand All @@ -293,10 +297,10 @@ def get_score_set_mapped_variants(

mapped_variants = (
db.query(MappedVariant)
.filter(ScoreSet.urn == urn)
.filter(ScoreSet.id == Variant.score_set_id)
.filter(Variant.id == MappedVariant.variant_id)
.all()
.filter(ScoreSet.urn == urn)
.filter(ScoreSet.id == Variant.score_set_id)
.filter(Variant.id == MappedVariant.variant_id)
.all()
)

if not mapped_variants:
Expand All @@ -316,10 +320,10 @@ def get_score_set_mapped_variants(
response_model_exclude_none=True,
)
async def create_score_set(
*,
item_create: score_set.ScoreSetCreate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
*,
item_create: score_set.ScoreSetCreate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
) -> Any:
"""
Create a score set.
Expand Down Expand Up @@ -461,9 +465,10 @@ async def create_score_set(
for identifier in item_create.primary_publication_identifiers or []
]
publication_identifiers = [
await find_or_create_publication_identifier(db, identifier.identifier, identifier.db_name)
for identifier in item_create.secondary_publication_identifiers or []
] + primary_publication_identifiers
await find_or_create_publication_identifier(db, identifier.identifier,
identifier.db_name)
for identifier in item_create.secondary_publication_identifiers or []
] + primary_publication_identifiers

# create a temporary `primary` attribute on each of our publications that indicates
# to our association proxy whether it is a primary publication or not
Expand Down Expand Up @@ -603,13 +608,13 @@ async def create_score_set(
response_model_exclude_none=True,
)
async def upload_score_set_variant_data(
*,
urn: str,
counts_file: Optional[UploadFile] = File(None),
scores_file: UploadFile = File(...),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
*,
urn: str,
counts_file: Optional[UploadFile] = File(None),
scores_file: UploadFile = File(...),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
) -> Any:
"""
Upload scores and variant count files for a score set, and initiate processing these files to
Expand Down Expand Up @@ -660,12 +665,12 @@ async def upload_score_set_variant_data(
"/score-sets/{urn}", response_model=score_set.ScoreSet, responses={422: {}}, response_model_exclude_none=True
)
async def update_score_set(
*,
urn: str,
item_update: score_set.ScoreSetUpdate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
*,
urn: str,
item_update: score_set.ScoreSetUpdate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
) -> Any:
"""
Update a score set.
Expand Down Expand Up @@ -722,9 +727,10 @@ async def update_score_set(
for identifier in item_update.primary_publication_identifiers or []
]
publication_identifiers = [
await find_or_create_publication_identifier(db, identifier.identifier, identifier.db_name)
for identifier in item_update.secondary_publication_identifiers or []
] + primary_publication_identifiers
await find_or_create_publication_identifier(db, identifier.identifier,
identifier.db_name)
for identifier in item_update.secondary_publication_identifiers or []
] + primary_publication_identifiers

# create a temporary `primary` attribute on each of our publications that indicates
# to our association proxy whether it is a primary publication or not
Expand Down Expand Up @@ -863,15 +869,15 @@ async def update_score_set(
if item.variants:
assert item.dataset_columns is not None
score_columns = [
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["score_columns"]
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["score_columns"]
count_columns = [
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["count_columns"]
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["count_columns"]

scores_data = pd.DataFrame(
variants_to_csv_rows(item.variants, columns=score_columns, dtype="score_data")
Expand Down Expand Up @@ -914,10 +920,10 @@ async def update_score_set(

@router.delete("/score-sets/{urn}", responses={422: {}})
async def delete_score_set(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
) -> Any:
"""
Delete a score set.
Expand Down Expand Up @@ -952,10 +958,10 @@ async def delete_score_set(
response_model_exclude_none=True,
)
def publish_score_set(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
) -> Any:
"""
Publish a score set.
Expand Down
90 changes: 90 additions & 0 deletions tests/routers/test_score_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -1557,3 +1557,93 @@ def test_can_modify_metadata_for_score_set_with_inactive_license(session, client
assert response.status_code == 200
response_data = response.json()
assert ("title", response_data["title"]) == ("title", "Update title")

########################################################################################################################
# Supersede score set
########################################################################################################################

def test_create_superseding_score_set(session, data_provider, client, setup_router_db, data_files):
experiment = create_experiment(client)
score_set = create_seq_score_set_with_variants(
client, session, data_provider, experiment["urn"], data_files / "scores.csv"
)
publish_score_set_response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish")
assert publish_score_set_response.status_code == 200
published_score_set = publish_score_set_response.json()
score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET)
score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"]
score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"]
superseding_score_set_response = client.post("/api/v1/score-sets/", json=score_set_post_payload)
assert superseding_score_set_response.status_code == 200

def test_can_view_unpublished_superseding_score_set(session, data_provider, client, setup_router_db, data_files):
experiment = create_experiment(client)
unpublished_score_set = create_seq_score_set_with_variants(
client, session, data_provider, experiment["urn"], data_files / "scores.csv"
)
publish_score_set_response = client.post(f"/api/v1/score-sets/{unpublished_score_set['urn']}/publish")
assert publish_score_set_response.status_code == 200
published_score_set = publish_score_set_response.json()
score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET)
score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"]
score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"]
superseding_score_set_response = client.post("/api/v1/score-sets/", json=score_set_post_payload)
assert superseding_score_set_response.status_code == 200
superseding_score_set = superseding_score_set_response.json()
score_set_response = client.get(f"/api/v1/score-sets/{published_score_set['urn']}")
score_set = score_set_response.json()
assert score_set_response.status_code == 200
assert score_set["urn"] == superseding_score_set["supersededScoreSet"]["urn"]
assert score_set["supersedingScoreSet"]["urn"] == superseding_score_set["urn"]

def test_cannot_view_others_unpublished_superseding_score_set(session, data_provider, client, setup_router_db, data_files):
experiment = create_experiment(client)
unpublished_score_set = create_seq_score_set_with_variants(
client, session, data_provider, experiment["urn"], data_files / "scores.csv"
)
publish_score_set_response = client.post(f"/api/v1/score-sets/{unpublished_score_set['urn']}/publish")
assert publish_score_set_response.status_code == 200
published_score_set = publish_score_set_response.json()
score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET)
score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"]
score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"]
superseding_score_set_response = client.post("/api/v1/score-sets/", json=score_set_post_payload)
assert superseding_score_set_response.status_code == 200
superseding_score_set = superseding_score_set_response.json()
change_ownership(session, superseding_score_set["urn"], ScoreSetDbModel)
score_set_response = client.get(f"/api/v1/score-sets/{published_score_set['urn']}")
score_set = score_set_response.json()
assert score_set_response.status_code == 200
assert score_set["urn"] == superseding_score_set["supersededScoreSet"]["urn"]
# Other users can't view the unpublished superseding score set.
assert "supersedingScoreSet" not in score_set

def test_can_view_others_published_superseding_score_set(session, data_provider, client, setup_router_db, data_files):
experiment = create_experiment(client)
unpublished_score_set = create_seq_score_set_with_variants(
client, session, data_provider, experiment["urn"], data_files / "scores.csv"
)
publish_score_set_response = client.post(f"/api/v1/score-sets/{unpublished_score_set['urn']}/publish")
assert publish_score_set_response.status_code == 200
published_score_set = publish_score_set_response.json()

superseding_score_set = create_seq_score_set_with_variants(
client,
session,
data_provider,
published_score_set["experiment"]["urn"],
data_files / "scores.csv",
update={"supersededScoreSetUrn": published_score_set["urn"]},
)
published_superseding_score_set_response = client.post(f"/api/v1/score-sets/{superseding_score_set['urn']}/publish")
assert published_superseding_score_set_response.status_code == 200
published_superseding_score_set = published_superseding_score_set_response.json()

change_ownership(session, published_superseding_score_set["urn"], ScoreSetDbModel)

score_set_response = client.get(f"/api/v1/score-sets/{published_score_set['urn']}")
assert score_set_response.status_code == 200
score_set = score_set_response.json()
assert score_set["urn"] == published_superseding_score_set["supersededScoreSet"]["urn"]
# Other users can view published superseding score set.
assert score_set["supersedingScoreSet"]["urn"] == published_superseding_score_set["urn"]
Loading