Skip to content

Commit

Permalink
Wrap around the session
Browse files Browse the repository at this point in the history
  • Loading branch information
kledo-lyft committed Dec 13, 2024
1 parent b99eb60 commit 83959b2
Showing 1 changed file with 25 additions and 22 deletions.
47 changes: 25 additions & 22 deletions cartography/intel/cve/feed.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,31 +85,34 @@ def _call_cves_api(url: str, api_key: str | None, params: Dict[str, Any]) -> Dic
)
results: Dict[Any, Any] = dict()

while params["resultsPerPage"] > 0 or params["startIndex"] < totalResults:
logger.info(f"Calling NIST NVD API at {url} with params {params}")
try:
with requests.get(
url, params=params, headers=headers, timeout=CONNECT_AND_READ_TIMEOUT,
) as res:
with requests.Session() as session:
while params["resultsPerPage"] > 0 or params["startIndex"] < totalResults:
logger.info(f"Calling NIST NVD API at {url} with params {params}")
try:
res = session.get(
url, params=params, headers=headers, timeout=CONNECT_AND_READ_TIMEOUT,
)
res.raise_for_status()
data = res.json()
except requests.exceptions.HTTPError:
logger.error(
f"Failed to get CVE data from NIST NVD API {res.status_code} : {res.text}",
)
retries += 1
if retries >= MAX_RETRIES:
raise
# Exponential backoff
sleep_time *= 2
except requests.exceptions.HTTPError:
logger.error(
f"Failed to get CVE data from NIST NVD API {res.status_code} : {res.text}",
)
retries += 1
if retries >= MAX_RETRIES:
raise
# Exponential backoff
sleep_time *= 2
time.sleep(sleep_time)
continue

_map_cve_dict(results, data)
totalResults = data["totalResults"]
params["resultsPerPage"] = data["resultsPerPage"]
params["startIndex"] += data["resultsPerPage"]
retries = 0
time.sleep(sleep_time)
continue
_map_cve_dict(results, data)
totalResults = data["totalResults"]
params["resultsPerPage"] = data["resultsPerPage"]
params["startIndex"] += data["resultsPerPage"]
retries = 0
time.sleep(sleep_time)

return results


Expand Down

0 comments on commit 83959b2

Please sign in to comment.