Skip to content

Commit

Permalink
Fix uniprot limit of 25 items per page.
Browse files Browse the repository at this point in the history
Fixing #244

Also big cleanup (black), refactoring of REST parent class into
attribute.
  • Loading branch information
cokelaer committed Dec 28, 2022
1 parent 297a57c commit cce05ab
Show file tree
Hide file tree
Showing 66 changed files with 1,524 additions and 1,961 deletions.
41 changes: 0 additions & 41 deletions .github/workflows/rnaseq_ebi.yml

This file was deleted.

5 changes: 5 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,11 @@ Changelog
========= ====================================================================
Version Description
========= ====================================================================
1.11.0 * Fix uniprot limitation of 25 results only (
* For developers: all services are now refactorised to use services
as an attribute rather than a parent class.
* Remove ReactomeOld and ReactomeAnalysis (deprecated)
* move rnaseq_ebi (deprecated) to attic for book_keeping
1.10.4 * Fix v1.10.3 adding missing requirements.txt
1.10.3 * Update pdb service to use v2 API
* remove biocarta (website not accesible anymore)
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@


_MAJOR = 1
_MINOR = 10
_MICRO = 4
_MINOR = 11
_MICRO = 0
version = '%d.%d.%d' % (_MAJOR, _MINOR, _MICRO)
release = '%d.%d' % (_MAJOR, _MINOR)

Expand Down
3 changes: 0 additions & 3 deletions src/bioservices/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,6 @@
from . import unichem
from .unichem import *

from . import rnaseq_ebi
from .rnaseq_ebi import RNASEQ_EBI

from . import reactome
from .reactome import *

Expand Down
2 changes: 1 addition & 1 deletion src/bioservices/attic/README.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ no __init__.py on purpose to prevent tests to be run
- geneprof.py web site does not work anymore
- miriam web site does not work anymore
- biocarta not accesible anymore and could not find alternative website (sept 2022)

- rnaseq_Ebi not accessible anymore
File renamed without changes.
2 changes: 0 additions & 2 deletions src/bioservices/bigg.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
-- BiGG Models Home Page, March 10, 2020.
"""

import os.path as osp

from bioservices.services import REST
from bioservices.util import sequencify, squash

Expand Down
5 changes: 1 addition & 4 deletions src/bioservices/biodbnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
.. sectionauthor:: Thomas Cokelaer, Feb 2014
"""
import io
from bioservices.services import REST
from bioservices import logger

Expand Down Expand Up @@ -90,7 +89,7 @@ def _interpret_output_db(self, input_db, output_db):
# remove spaces so as to compare the input/output databases with the
# list of databases returned by getInputs
outputs = self._list_to_string(output_db)
inputResult = self.getInputs()
#inputResult = self.getInputs()
# getOutputsForInput method
outputResult = self.getOutputsForInput(input_db)
outputResult = [this.lower().replace(" ", "") for this in outputResult]
Expand Down Expand Up @@ -259,8 +258,6 @@ def dbReport(self, input_db, input_values, taxon=9606):
return request
inputValues = self._interpret_input_db(inputValues)

# df = pd.readcsv(io.StringIO(res.strip()), sep="\t")

def dbWalk(self, db_path, input_values, taxon=9606):
"""Walk through biological database network
Expand Down
53 changes: 24 additions & 29 deletions src/bioservices/biomodels.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,25 +32,19 @@
"""
import os
import copy
import webbrowser
from functools import wraps
from urllib.request import urlopen

from bioservices import logger
from bioservices.services import REST

logger.name = __name__

from bioservices.services import REST

try:
# python 3
from urllib.request import urlopen
except:
from urllib2 import urlopen

__all__ = ["BioModels"]


class BioModels(REST):
class BioModels:
"""Interface to the `BioModels <http://www.ebi.ac.uk/biomodels>`_ service
::
Expand Down Expand Up @@ -91,7 +85,8 @@ def __init__(self, verbose=True):
"""
super(BioModels, self).__init__(name="BioModels", url=BioModels._url, verbose=verbose)
self.services = REST(name="BioModels", url=BioModels._url, verbose=verbose)


def _check_format(self, frmt, supported=["json", "xml", "html"]):
if frmt not in supported:
Expand All @@ -111,7 +106,7 @@ def get_all_models(self, chunk=100):
def get_model(self, model_id, frmt="json"):
"""Fetch information about a given model at a particular revision."""
self._check_format(frmt)
res = self.http_get(model_id, frmt=frmt, params={"format": frmt})
res = self.services.http_get(model_id, frmt=frmt, params={"format": frmt})
return res

def get_model_files(self, model_id, frmt="json"):
Expand All @@ -121,7 +116,7 @@ def get_model_files(self, model_id, frmt="json"):
:param frmt: format of the output (json, xml)
"""
self._check_format(frmt, ["xml", "json"])
res = self.http_get("model/files/{}".format(model_id), frmt=frmt, params={"format": frmt})
res = self.services.http_get("model/files/{}".format(model_id), frmt=frmt, params={"format": frmt})
return res

def get_model_download(self, model_id, filename=None, output_filename=None):
Expand Down Expand Up @@ -166,16 +161,16 @@ def get_model_download(self, model_id, filename=None, output_filename=None):
if filename:
params["filename"] = filename

res = self.http_get("model/download/{}".format(model_id), params=params)
res = self.services.http_get("model/download/{}".format(model_id), params=params)

if filename:
self.logging.info("Saving {}".format(filename))
self.services.logging.info("Saving {}".format(filename))
if output_filename is None:
output_filename = filename
with open(output_filename, "wb") as fout:
fout.write(res.content)
else:
self.logging.info("Saving file {}.zip".format(model_id))
self.services.logging.info("Saving file {}.zip".format(model_id))
if output_filename is None:
output_filename = "{}.zip".format(model_id)
with open(output_filename, "wb") as fout:
Expand Down Expand Up @@ -219,7 +214,7 @@ def search(self, query, offset=None, numResults=None, sort=None, frmt="json"):
]
if sort and sort not in sort_options:
raise ValueError("sort must be in {}. You provided {}".format(sort_options, sort))
res = self.http_get("search", params=params)
res = self.services.http_get("search", params=params)
return res

def search_download(self, models, output_filename="models.zip", force=False):
Expand All @@ -238,13 +233,13 @@ def search_download(self, models, output_filename="models.zip", force=False):
"""
if isinstance(models, list):
models = ",".join(models)
res = self.http_get("search/download", params={"models": models})
res = self.services.http_get("search/download", params={"models": models})

if res == 404:
self.logging.error("One of your model ID was probably incorrect")
self.services.logging.error("One of your model ID was probably incorrect")
return

self.logging.info(output_filename)
self.services.logging.info(output_filename)
if os.path.exists(output_filename) and force is False:
raise IOError(
"{} exists already. Set force to True or change the output_filename argument".format(output_filename)
Expand Down Expand Up @@ -287,7 +282,7 @@ def search_parameter(self, query, start=0, size=10, sort=None, frmt="json"):
if sort:
params["sort"] = sort

res = self.http_get("parameterSearch/search", params=params)
res = self.services.http_get("parameterSearch/search", params=params)

return res

Expand All @@ -300,9 +295,9 @@ def get_p2m_missing(self, frmt="json"):
"""
self._check_format(frmt)
res = self.http_get("p2m/missing", params={"format": frmt})
res = self.services.http_get("p2m/missing", params={"format": frmt})
res = res["missing"]
self.logging.info("Found {} missing model".format(len(res)))
self.services.logging.info("Found {} missing model".format(len(res)))
return res

def get_p2m_representative(self, model, frmt="json"):
Expand All @@ -317,7 +312,7 @@ def get_p2m_representative(self, model, frmt="json"):
"""
self._check_format(frmt)
res = self.http_get("p2m/representative", params={"format": frmt, "model": model})
res = self.services.http_get("p2m/representative", params={"format": frmt, "model": model})
return res

def get_p2m_representatives(self, models, frmt="json"):
Expand Down Expand Up @@ -347,7 +342,7 @@ def get_p2m_representatives(self, models, frmt="json"):
models = ",".join([x.strip() for x in models.split(",")])

self._check_format(frmt)
res = self.http_get("p2m/representatives", params={"format": frmt, "modelIds": models})
res = self.services.http_get("p2m/representatives", params={"format": frmt, "modelIds": models})
return res

def get_pdgsmm_missing(self, frmt="json"):
Expand All @@ -357,9 +352,9 @@ def get_pdgsmm_missing(self, frmt="json"):
:return: list of model identifiers
"""
self._check_format(frmt)
res = self.http_get("pdgsmm/missing", params={"format": frmt})
res = self.services.http_get("pdgsmm/missing", params={"format": frmt})
res = res["missing"]
self.logging.info("Found {} missing model".format(len(res)))
self.services.logging.info("Found {} missing model".format(len(res)))
return res

def get_pdgsmm_representative(self, model, frmt="json"):
Expand All @@ -374,7 +369,7 @@ def get_pdgsmm_representative(self, model, frmt="json"):
"""
self._check_format(frmt)
res = self.http_get("pdgsmm/representative", params={"format": frmt, "model": model})
res = self.services.http_get("pdgsmm/representative", params={"format": frmt, "model": model})
return res

def get_pdgsmm_representatives(self, models, frmt="json"):
Expand All @@ -397,5 +392,5 @@ def get_pdgsmm_representatives(self, models, frmt="json"):
models = ",".join([x.strip() for x in models.split(",")])

self._check_format(frmt)
res = self.http_get("pdgsmm/representatives", params={"format": frmt, "modelIds": models})
res = self.services.http_get("pdgsmm/representatives", params={"format": frmt, "modelIds": models})
return res
7 changes: 3 additions & 4 deletions src/bioservices/chembl.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,8 +355,7 @@ class ChEMBL:
_url = "https://www.ebi.ac.uk/chembl/api/data"

def __init__(self, verbose=False, cache=False):
self.services = REST(name="ChEMBL", url=ChEMBL._url, verbose=verbose, cache=cache,
url_defined_later=True)
self.services = REST(name="ChEMBL", url=ChEMBL._url, verbose=verbose, cache=cache, url_defined_later=True)
self.format = "json"

def _get_data(self, name, params):
Expand Down Expand Up @@ -403,7 +402,7 @@ def _get_data(self, name, params):

count = 1

with tqdm(total=N, desc='TEST') as pb:
with tqdm(total=N, desc="TEST") as pb:
while res["page_meta"]["next"] and len(data) < max_data:
params["limit"] = limit
params["offset"] = limit * count + offset
Expand Down Expand Up @@ -881,7 +880,7 @@ def get_xref_source(self, query=None, limit=20, offset=0, filters=None):
return self._get_this_service("xref_source", query, params=params)

def get_image(self, query, dimensions=500, format="png", save=True, view=True, engine="indigo"):
"""Get the image of a given compound in PNG png format.
r"""Get the image of a given compound in PNG png format.
:param str query: a valid compound ChEMBLId or a list/tuple
of valid compound ChEMBLIds.
Expand Down
15 changes: 8 additions & 7 deletions src/bioservices/dbfetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
__all__ = ["DBFetch"]


class DBFetch(REST):
class DBFetch:
"""Interface to `DBFetch <http://www.ebi.ac.uk/Tools/webservices/services/dbfetch_rest>`_ service
::
Expand All @@ -59,7 +59,8 @@ def __init__(self, verbose=False):
:param bool verbose: print informative messages
"""
super(DBFetch, self).__init__(name="DBfetch", url=DBFetch._url, verbose=verbose)

self.services = REST(name="DBfetch", url=DBFetch._url, verbose=verbose)
self._supportedDBs = None
self._supportedFormats = None
self._supportedStyles = None
Expand Down Expand Up @@ -93,7 +94,7 @@ def fetch(self, query, db="ena_sequence", format="default", style="raw", pageHtm
"""
self._check_db(db)
res = self.http_get(
res = self.services.http_get(
"dbfetch",
params={
"db": db,
Expand Down Expand Up @@ -122,7 +123,7 @@ def get_database_info(self, db=None):
'The UniProt Knowledgebase (UniProtKB) is the central access point for extensive curated protein information, including function, classification, and cross-references. Search UniProtKB to retrieve everything that is known about a particular sequence.'
"""
res = self.http_get("dbfetch/dbfetch.databases?style=json")
res = self.services.http_get("dbfetch/dbfetch.databases?style=json")
if db:
self._check_db(db)
res = res[db]
Expand Down Expand Up @@ -158,7 +159,7 @@ def get_database_formats(self, db):
"""
self._check_db(db)
res = self.http_get("dbfetch?info=formats&db={}".format(db)).content
res = self.services.http_get("dbfetch?info=formats&db={}".format(db)).content
res = res.decode().split()
return res

Expand All @@ -177,7 +178,7 @@ def get_database_format_styles(self, db, format):
"""
self._check_db(db)
res = self.http_get("dbfetch?info=styles&format={}&db={}".format(format, db)).content
res = self.services.http_get("dbfetch?info=styles&format={}&db={}".format(format, db)).content
res = res.decode().split()
return res

Expand All @@ -189,7 +190,7 @@ def _getSupportedDBs(self):
if self._supportedDBs:
return self._supportedDBs
else:
res = self.http_get("dbfetch?info=dbs").content
res = self.services.http_get("dbfetch?info=dbs").content
self._supportedDBs = res.decode().split() + ["default"]
return self._supportedDBs

Expand Down
2 changes: 1 addition & 1 deletion src/bioservices/ensembl.py
Original file line number Diff line number Diff line change
Expand Up @@ -1636,7 +1636,7 @@ def post_vep_by_id(self, species, identifiers):
raise NotImplementedError
# POST vep/:species/id/ Fetch variant consequences for multiple ids
self._check_frmt(frmt, ["xml"])
res = self.services.http_get(
_ = self.services.http_get(
"variation/{0}/{1}".format(species, identifier),
frmt=frmt,
headers=self.services.get_headers(content=frmt),
Expand Down
Loading

0 comments on commit cce05ab

Please sign in to comment.