Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix current version bug #366

Open
wants to merge 6 commits into
base: release-2024.4.4
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/mavedb/lib/validation/urn_re.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
MAVEDB_TMP_URN_PATTERN = r"tmp:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
MAVEDB_TMP_URN_RE = re.compile(MAVEDB_TMP_URN_PATTERN)

# Old temp URN
MAVEDB_OLD_TMP_URN_PATTERN = r"^tmp:[A-Za-z0-9]{16}$"
MAVEDB_OLD_TMP_URN_RE = re.compile(MAVEDB_OLD_TMP_URN_PATTERN)

# Experiment set URN
MAVEDB_EXPERIMENT_SET_URN_PATTERN = rf"urn:{MAVEDB_URN_NAMESPACE}:\d{{{MAVEDB_EXPERIMENT_SET_URN_DIGITS}}}"
MAVEDB_EXPERIMENT_SET_URN_RE = re.compile(MAVEDB_EXPERIMENT_SET_URN_PATTERN)
Expand Down
152 changes: 83 additions & 69 deletions src/mavedb/routers/score_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
search_score_sets as _search_score_sets,
refresh_variant_urns,
)
from mavedb.lib.validation import urn_re
from mavedb.lib.taxonomies import find_or_create_taxonomy
from mavedb.lib.urns import (
generate_experiment_set_urn,
Expand All @@ -64,7 +65,7 @@


async def fetch_score_set_by_urn(
db, urn: str, user: Optional[UserData], owner_or_contributor: Optional[UserData], only_published: bool
db, urn: str, user: Optional[UserData], owner_or_contributor: Optional[UserData], only_published: bool
) -> Optional[ScoreSet]:
"""
Fetch one score set by URN, ensuring that the user has read permission.
Expand Down Expand Up @@ -103,6 +104,17 @@ async def fetch_score_set_by_urn(
raise HTTPException(status_code=404, detail=f"score set with URN '{urn}' not found")

assert_permission(user, item, Action.READ)
if(
item
and item.superseding_score_set
and not owner_or_contributor
and (
urn_re.MAVEDB_OLD_TMP_URN_RE.fullmatch(item.superseding_score_set.urn)
or urn_re.MAVEDB_TMP_URN_RE.fullmatch(item.superseding_score_set.urn)
)
):
item.superseding_score_set = None
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What if instead of checking like this we check the permissions of the superseding score set, something like:

Suggested change
if(
item
and item.superseding_score_set
and not owner_or_contributor
and (
urn_re.MAVEDB_OLD_TMP_URN_RE.fullmatch(item.superseding_score_set.urn)
or urn_re.MAVEDB_TMP_URN_RE.fullmatch(item.superseding_score_set.urn)
)
):
item.superseding_score_set = None
from mavedb.lib.permissions import Action, assert_permission, has_permission
if item.superseding_score_set:
superseding_score_set = db.scalars(select(ScoreSet).where(ScoreSet.urn = item.superseding_score_set).one()
if not has_permission(user, item, Action.READ):
item.superseding_score_set = None

This way, we don't have to maintain any extra permission logic and can guarantee the item within the superseding score set property is only returned if the user has access to it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I modified it. I check item.superseding_score_set directly cause it's an object.


return item


Expand All @@ -128,9 +140,9 @@ def search_score_sets(search: ScoreSetsSearch, db: Session = Depends(deps.get_db
response_model=list[score_set.ShortScoreSet],
)
def search_my_score_sets(
search: ScoreSetsSearch, # = Body(..., embed=True),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
search: ScoreSetsSearch, # = Body(..., embed=True),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
) -> Any:
"""
Search score sets created by the current user..
Expand All @@ -146,10 +158,10 @@ def search_my_score_sets(
response_model_exclude_none=True,
)
async def show_score_set(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(get_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(get_current_user),
) -> Any:
"""
Fetch a single score set by URN.
Expand All @@ -165,17 +177,17 @@ async def show_score_set(
200: {
"content": {"text/csv": {}},
"description": """Variant scores in CSV format, with four fixed columns (accession, hgvs_nt, hgvs_pro,"""
""" and hgvs_splice), plus score columns defined by the score set.""",
""" and hgvs_splice), plus score columns defined by the score set.""",
}
},
)
def get_score_set_scores_csv(
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
) -> Any:
"""
Return scores from a score set, identified by URN, in CSV format.
Expand Down Expand Up @@ -219,17 +231,17 @@ def get_score_set_scores_csv(
200: {
"content": {"text/csv": {}},
"description": """Variant counts in CSV format, with four fixed columns (accession, hgvs_nt, hgvs_pro,"""
""" and hgvs_splice), plus score columns defined by the score set.""",
""" and hgvs_splice), plus score columns defined by the score set.""",
}
},
)
async def get_score_set_counts_csv(
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
*,
urn: str,
start: int = Query(default=None, description="Start index for pagination"),
limit: int = Query(default=None, description="Number of variants to return"),
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
) -> Any:
"""
Return counts from a score set, identified by URN, in CSV format.
Expand Down Expand Up @@ -272,10 +284,10 @@ async def get_score_set_counts_csv(
response_model=list[mapped_variant.MappedVariant],
)
def get_score_set_mapped_variants(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: Optional[UserData] = Depends(get_current_user),
) -> Any:
"""
Return mapped variants from a score set, identified by URN.
Expand All @@ -293,10 +305,10 @@ def get_score_set_mapped_variants(

mapped_variants = (
db.query(MappedVariant)
.filter(ScoreSet.urn == urn)
.filter(ScoreSet.id == Variant.score_set_id)
.filter(Variant.id == MappedVariant.variant_id)
.all()
.filter(ScoreSet.urn == urn)
.filter(ScoreSet.id == Variant.score_set_id)
.filter(Variant.id == MappedVariant.variant_id)
.all()
)

if not mapped_variants:
Expand All @@ -316,10 +328,10 @@ def get_score_set_mapped_variants(
response_model_exclude_none=True,
)
async def create_score_set(
*,
item_create: score_set.ScoreSetCreate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
*,
item_create: score_set.ScoreSetCreate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
) -> Any:
"""
Create a score set.
Expand Down Expand Up @@ -461,9 +473,10 @@ async def create_score_set(
for identifier in item_create.primary_publication_identifiers or []
]
publication_identifiers = [
await find_or_create_publication_identifier(db, identifier.identifier, identifier.db_name)
for identifier in item_create.secondary_publication_identifiers or []
] + primary_publication_identifiers
await find_or_create_publication_identifier(db, identifier.identifier,
identifier.db_name)
for identifier in item_create.secondary_publication_identifiers or []
] + primary_publication_identifiers

# create a temporary `primary` attribute on each of our publications that indicates
# to our association proxy whether it is a primary publication or not
Expand Down Expand Up @@ -603,13 +616,13 @@ async def create_score_set(
response_model_exclude_none=True,
)
async def upload_score_set_variant_data(
*,
urn: str,
counts_file: Optional[UploadFile] = File(None),
scores_file: UploadFile = File(...),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
*,
urn: str,
counts_file: Optional[UploadFile] = File(None),
scores_file: UploadFile = File(...),
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
) -> Any:
"""
Upload scores and variant count files for a score set, and initiate processing these files to
Expand Down Expand Up @@ -660,12 +673,12 @@ async def upload_score_set_variant_data(
"/score-sets/{urn}", response_model=score_set.ScoreSet, responses={422: {}}, response_model_exclude_none=True
)
async def update_score_set(
*,
urn: str,
item_update: score_set.ScoreSetUpdate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
*,
urn: str,
item_update: score_set.ScoreSetUpdate,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user_with_email),
worker: ArqRedis = Depends(deps.get_worker),
) -> Any:
"""
Update a score set.
Expand Down Expand Up @@ -722,9 +735,10 @@ async def update_score_set(
for identifier in item_update.primary_publication_identifiers or []
]
publication_identifiers = [
await find_or_create_publication_identifier(db, identifier.identifier, identifier.db_name)
for identifier in item_update.secondary_publication_identifiers or []
] + primary_publication_identifiers
await find_or_create_publication_identifier(db, identifier.identifier,
identifier.db_name)
for identifier in item_update.secondary_publication_identifiers or []
] + primary_publication_identifiers

# create a temporary `primary` attribute on each of our publications that indicates
# to our association proxy whether it is a primary publication or not
Expand Down Expand Up @@ -863,15 +877,15 @@ async def update_score_set(
if item.variants:
assert item.dataset_columns is not None
score_columns = [
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["score_columns"]
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["score_columns"]
count_columns = [
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["count_columns"]
"hgvs_nt",
"hgvs_splice",
"hgvs_pro",
] + item.dataset_columns["count_columns"]

scores_data = pd.DataFrame(
variants_to_csv_rows(item.variants, columns=score_columns, dtype="score_data")
Expand Down Expand Up @@ -914,10 +928,10 @@ async def update_score_set(

@router.delete("/score-sets/{urn}", responses={422: {}})
async def delete_score_set(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
) -> Any:
"""
Delete a score set.
Expand Down Expand Up @@ -952,10 +966,10 @@ async def delete_score_set(
response_model_exclude_none=True,
)
def publish_score_set(
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
*,
urn: str,
db: Session = Depends(deps.get_db),
user_data: UserData = Depends(require_current_user),
) -> Any:
"""
Publish a score set.
Expand Down
Loading