Skip to content

Commit

Permalink
add working metric
Browse files Browse the repository at this point in the history
  • Loading branch information
KaiWaldrant committed Jul 10, 2024
1 parent 3022be8 commit fdb4f26
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 62 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,35 +8,31 @@ __merge__: ../../api/comp_metric.yaml

# A unique identifier for your component (required).
# Can contain only lowercase letters or underscores.
name: my_metric
name: accuracy

# Metadata for your component
info:
metrics:
# A unique identifier for your metric (required).
# Can contain only lowercase letters or underscores.
name: my_metric
# A relatively short label, used when rendering visualisarions (required)
label: My Metric
# A one sentence summary of how this metric works (required). Used when
# rendering summary tables.
summary: "FILL IN: A one sentence summary of this metric."
# A multi-line description of how this component works (required). Used
# when rendering reference documentation.
description: |
FILL IN: A (multi-line) description of how this metric works.
# A reference key from the bibtex library at src/common/library.bib (required).
reference: bibtex_reference_key
# URL to the documentation for this metric (required).
documentation_url: https://url.to/the/documentation
# URL to the code repository for this metric (required).
repository_url: https://github.com/organisation/repository
# The minimum possible value for this metric (required)
min: 0
# The maximum possible value for this metric (required)
max: 1
# Whether a higher value represents a 'better' solution (required)
maximize: true
- name: accuracy
# A relatively short label, used when rendering visualisarions (required)
label: Accuracy
# A one sentence summary of how this metric works (required). Used when
# rendering summary tables.
summary: "The percentage of correctly predicted labels."
# A multi-line description of how this component works (required). Used
# when rendering reference documentation.
description: |
The percentage of correctly predicted labels.
# A reference key from the bibtex library at src/common/library.bib (required).
reference: grandini2020metrics
# The minimum possible value for this metric (required)
min: 0
# The maximum possible value for this metric (required)
max: 1
# Whether a higher value represents a 'better' solution (required)
maximize: true

# Component-specific parameters (optional)
# arguments:
Expand All @@ -57,12 +53,12 @@ resources:
engines:
# Specifications for the Docker image for this component.
- type: docker
image: ghcr.io/openproblems-bio/base_python:1.0.4
image: ghcr.io/openproblems-bio/base_images/python:1.1.0
# Add custom dependencies here (optional). For more information, see
# https://viash.io/reference/config/engines/docker/#setup .
# setup:
# - type: python
# packages: scib==1.1.5
setup:
- type: python
packages: scikit-learn

runners:
# This platform allows running the component natively
Expand Down
47 changes: 47 additions & 0 deletions src/metrics/accuracy/script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import anndata as ad
import numpy as np
import sklearn.preprocessing

## VIASH START
# Note: this section is auto-generated by viash at runtime. To edit it, make changes
# in config.vsh.yaml and then run `viash config inject config.vsh.yaml`.
par = {
'input_solution': 'resources_test/task_template/pancreas/solution.h5ad',
'input_prediction': 'resources_test/task_template/pancreas/prediction.h5ad',
'output': 'output.h5ad'
}
meta = {
'name': 'accuracy'
}
## VIASH END

print('Reading input files', flush=True)
input_solution = ad.read_h5ad(par['input_solution'])
input_prediction = ad.read_h5ad(par['input_prediction'])

assert (input_prediction.obs_names == input_solution.obs_names).all(), "obs_names not the same in prediction and solution inputs"

print("Encode labels", flush=True)
cats = list(input_solution.obs["label"].dtype.categories) + list(input_prediction.obs["label_pred"].dtype.categories)
encoder = sklearn.preprocessing.LabelEncoder().fit(cats)
input_solution.obs["label"] = encoder.transform(input_solution.obs["label"])
input_prediction.obs["label_pred"] = encoder.transform(input_prediction.obs["label_pred"])


print('Compute metrics', flush=True)
# metric_ids and metric_values can have length > 1
# but should be of equal length
uns_metric_ids = [ 'accuracy' ]
uns_metric_values = np.mean(input_solution.obs["label"] == input_prediction.obs["label_pred"])

print("Write output AnnData to file", flush=True)
output = ad.AnnData(
uns={
'dataset_id': input_prediction.uns['dataset_id'],
'normalization_id': input_prediction.uns['normalization_id'],
'method_id': input_prediction.uns['method_id'],
'metric_ids': uns_metric_ids,
'metric_values': uns_metric_values
}
)
output.write_h5ad(par['output'], compression='gzip')
35 changes: 0 additions & 35 deletions src/metrics/my_metric/script.py

This file was deleted.

0 comments on commit fdb4f26

Please sign in to comment.