-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Github scraper initial #38
base: main
Are you sure you want to change the base?
Changes from 1 commit
5ae340d
9ac6e9a
6f193a7
783dc91
8c5e454
bfc5987
ef602cd
16e0892
5706160
3abe848
1b73a82
fc656de
761e5ea
79f5ac5
b1ecc6a
e5aa95d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,129 @@ | ||
import os | ||
import re | ||
import pandas as pd | ||
import requests | ||
from pyarrow import parquet as pq | ||
from tqdm import tqdm | ||
from logger import setup_logger | ||
|
||
logger = setup_logger() | ||
|
||
# Constants | ||
GITHUB_API_URL = "https://api.github.com/repos/" | ||
AUTH_HEADER = {"Authorization": ""} | ||
# AUTH_HEADER = {"Authorization": os.getenv("GITHUB_TOKEN", "")} # Use an environment variable for the GitHub token | ||
|
||
# Fields to extract from the GitHub API response | ||
FIELDS_TO_EXTRACT = { | ||
"created_at": "created_at", | ||
"updated_at": "updated_at", | ||
"pushed_at": "pushed_at", | ||
"stargazers_count": "stargazers_count", | ||
"forks_count": "forks_count", | ||
"open_issues_count": "open_issues_count", | ||
"subscribers_count": "subscribers_count", | ||
"watchers_count": "watchers_count", | ||
"releases_url": "releases_url", | ||
"commits_url": "commits_url", | ||
"collaborators_url": "collaborators_url", | ||
"contributors_url": "contributors_url", | ||
srossross marked this conversation as resolved.
Show resolved
Hide resolved
|
||
"license.name": "license", | ||
} | ||
|
||
|
||
def fetch_github_data(repo_url): | ||
""" | ||
Fetches data from the GitHub API for a given repository URL and extracts specified fields. | ||
|
||
Args: | ||
repo_url (str): The GitHub repository URL. | ||
|
||
Returns: | ||
dict: A dictionary containing the extracted data fields. | ||
""" | ||
repo_name = "/".join(repo_url.split("/")[-2:]) | ||
response = requests.get(GITHUB_API_URL + repo_name, headers=AUTH_HEADER) | ||
srossross marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if response.status_code == 200: | ||
data = response.json() | ||
extracted_data = {} | ||
for key, field in FIELDS_TO_EXTRACT.items(): | ||
if "." in key: | ||
top_level_key, nested_key = key.split(".") | ||
top_level_data = data.get(top_level_key, {}) | ||
if isinstance(top_level_data, dict): | ||
extracted_data[field] = top_level_data.get(nested_key, None) | ||
else: | ||
extracted_data[field] = None | ||
else: | ||
extracted_data[field] = data.get(key, None) | ||
return extracted_data | ||
else: | ||
logger.error(f"Failed to fetch data for {repo_url}: {response.status_code}") | ||
return None | ||
|
||
|
||
def scrape_github_data(config): | ||
""" | ||
Scrapes GitHub data for packages specified by the configuration. | ||
|
||
Args: | ||
config (dict): Configuration dictionary containing letters to scrape. | ||
""" | ||
letters_to_scrape = config["letters"] | ||
all_data = [] | ||
|
||
for letter in letters_to_scrape: | ||
directory = f"output/json/first_letter={letter}" | ||
if os.path.exists(directory): | ||
for file_name in os.listdir(directory): | ||
if file_name.endswith(".parquet"): | ||
file_path = os.path.join(directory, file_name) | ||
df = pq.read_table(file_path).to_pandas() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is this from pypi? we probably should update the output name to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. also you should use hive partiionin here pd.read_parquet(
directory,
filters=[("first_letter", "==", letters_to_scrape)],
) You will have to update the filters work the "==" will not work with the list of letters There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. a few more thoughts. The source list of github urls will not be exclusive to pypi, what if there is a conda package that is not in pypi? as a side note this can also be done in duckdb like so: -- Main query to process the data and return GitHub URLs with first_letter filter in package_data CTE
WITH pypi_package_data AS (
SELECT
first_letter,
project_urls,
home_page
FROM read_parquet('output/json/first_letter=*/**.parquet')
WHERE first_letter IN ('a', 'b', 'c') -- Replace with your desired letters
),
pypi_github_urls AS (
SELECT
COALESCE(
json_extract(project_urls, '$.Source'),
json_extract(project_urls, '$.Homepage'),
home_page
) AS source_url
FROM pypi_package_data
)
SELECT DISTINCT source_url
FROM pypi_github_urls
WHERE source_url LIKE '%github.com%'
ORDER BY source_url; |
||
|
||
# Reconstruct project_urls from flattened columns | ||
df["project_urls"] = df.filter(like="project_urls.").apply( | ||
lambda row: { | ||
col.split(".")[-1]: row[col] | ||
for col in row.index | ||
if pd.notna(row[col]) | ||
}, | ||
axis=1, | ||
) | ||
|
||
for _, row in tqdm( | ||
df.iterrows(), total=len(df), desc=f"Processing letter {letter}" | ||
): | ||
package_name = row.get("name") | ||
|
||
# Get the GitHub URL from project_urls or home_page | ||
source_url = row.get("project_urls", {}).get("Some_identifier") | ||
if not source_url or "github.com" not in source_url: | ||
source_url = row.get("home_page") | ||
|
||
# Ensure the URL is in the correct format | ||
if source_url and "github.com" in source_url: | ||
repo_match = re.match( | ||
r"https?://github\.com/[^/]+/[^/]+", source_url | ||
) | ||
if repo_match: | ||
data = fetch_github_data(repo_match.group()) | ||
if data: | ||
data["first_letter"] = letter | ||
data["package_name"] = ( | ||
package_name # Add the package name | ||
) | ||
all_data.append(data) | ||
|
||
# Save the scraped data to a parquet file | ||
if all_data: | ||
output_df = pd.DataFrame(all_data) | ||
output_dir = "output/github" | ||
if not os.path.exists(output_dir): | ||
os.makedirs(output_dir) | ||
output_file = os.path.join(output_dir, "github_data.parquet") | ||
output_df.to_parquet(output_file, partition_cols=["first_letter"]) | ||
logger.info( | ||
"Scraping completed and data saved to output/github/github_data.parquet" | ||
) | ||
else: | ||
logger.info("No valid GitHub URLs found or failed to fetch data.") |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -87,6 +87,7 @@ def process_packages_by_letter(letter, package_names, output_dir): | |
all_package_data = [] | ||
for package_name in tqdm(letter_package_names, desc=f"Processing letter {letter}"): | ||
package_data = get_package_data(package_name) | ||
df = pd.json_normalize(package_data) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what is this doing? |
||
if package_data: | ||
all_package_data.append(package_data) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why store this url? can we fetch a list of collaborators instead?