Skip to content

Commit

Permalink
Align os.path usage (#8435)
Browse files Browse the repository at this point in the history
  • Loading branch information
rusty1s authored Nov 24, 2023
1 parent d1f6708 commit 5954bb9
Show file tree
Hide file tree
Showing 12 changed files with 66 additions and 68 deletions.
6 changes: 3 additions & 3 deletions graphgym/configs_gen.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import argparse
import copy
import csv
import os
import os.path as osp
import random

import numpy as np
Expand Down Expand Up @@ -126,7 +126,7 @@ def gen_grid(args, config, config_budget={}):
fname_start = get_fname(args.config)
out_dir = f'{args.out_dir}/{task_name}'
makedirs_rm_exist(out_dir)
config['out_dir'] = os.path.join(config['out_dir'], task_name)
config['out_dir'] = osp.join(config['out_dir'], task_name)

outs = load_search_file(args.grid)
for i, out in enumerate(outs):
Expand Down Expand Up @@ -166,7 +166,7 @@ def gen_grid_sample(args, config, config_budget={}, compare_alias_list=[]):
fname_start = get_fname(args.config)
out_dir = f'{args.out_dir}/{task_name}'
makedirs_rm_exist(out_dir)
config['out_dir'] = os.path.join(config['out_dir'], task_name)
config['out_dir'] = osp.join(config['out_dir'], task_name)
outs = load_search_file(args.grid)

counts = []
Expand Down
7 changes: 4 additions & 3 deletions test/profile/test_profile.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os.path
import os
import os.path as osp
import warnings

import pytest
Expand Down Expand Up @@ -105,7 +106,7 @@ def test_torch_profile(capfd, get_dataset, device):
assert 'Self CUDA time total' in out

rename_profile_file('test_profile')
assert os.path.exists('profile-test_profile.json')
assert osp.exists('profile-test_profile.json')
os.remove('profile-test_profile.json')


Expand All @@ -128,7 +129,7 @@ def test_xpu_profile(capfd, get_dataset, export_chrome_trace):
assert 'Self XPU' in out

f_name = 'timeline.json'
f_exists = os.path.exists(f_name)
f_exists = osp.exists(f_name)
if not export_chrome_trace:
assert not f_exists
else:
Expand Down
4 changes: 2 additions & 2 deletions torch_geometric/datasets/dgraph.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import os
import os.path as osp
from typing import Callable, Optional

import numpy as np
Expand Down Expand Up @@ -75,7 +75,7 @@ def num_classes(self) -> int:

def process(self):
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
path = os.path.join(self.raw_dir, "dgraphfin.npz")
path = osp.join(self.raw_dir, "dgraphfin.npz")

with np.load(path) as loader:
x = torch.from_numpy(loader['x']).to(torch.float)
Expand Down
4 changes: 2 additions & 2 deletions torch_geometric/datasets/neurograph.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,15 +85,15 @@ def __init__(

@property
def raw_dir(self) -> str:
return os.path.join(self.root, self.name, 'raw')
return osp.join(self.root, self.name, 'raw')

@property
def raw_file_names(self) -> str:
return 'data.pt'

@property
def processed_dir(self) -> str:
return os.path.join(self.root, self.name, 'processed')
return osp.join(self.root, self.name, 'processed')

@property
def processed_file_names(self) -> str:
Expand Down
5 changes: 2 additions & 3 deletions torch_geometric/datasets/rel_link_pred_dataset.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import os
import os.path as osp
from typing import Callable, List, Optional

Expand Down Expand Up @@ -60,11 +59,11 @@ def num_relations(self) -> int:

@property
def raw_dir(self) -> str:
return os.path.join(self.root, self.name, 'raw')
return osp.join(self.root, self.name, 'raw')

@property
def processed_dir(self) -> str:
return os.path.join(self.root, self.name, 'processed')
return osp.join(self.root, self.name, 'processed')

@property
def processed_file_names(self) -> str:
Expand Down
4 changes: 2 additions & 2 deletions torch_geometric/datasets/sbm_dataset.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import os
import os.path as osp
from typing import Callable, List, Optional, Union

import numpy as np
Expand Down Expand Up @@ -72,7 +72,7 @@ def __init__(

@property
def processed_dir(self) -> str:
return os.path.join(self.root, self.__class__.__name__, 'processed')
return osp.join(self.root, self.__class__.__name__, 'processed')

@property
def processed_file_names(self) -> str:
Expand Down
18 changes: 9 additions & 9 deletions torch_geometric/distributed/partition.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,31 +288,31 @@ def load_partition_info(
) -> Tuple[Dict, int, int, torch.Tensor, torch.Tensor]:

# load the partition with PyG format (graphstore/featurestore)
with open(os.path.join(root_dir, 'META.json'), 'rb') as infile:
with open(osp.join(root_dir, 'META.json'), 'rb') as infile:
meta = json.load(infile)
num_partitions = meta['num_parts']
assert partition_idx >= 0
assert partition_idx < num_partitions
partition_dir = os.path.join(root_dir, f'part_{partition_idx}')
assert os.path.exists(partition_dir)
partition_dir = osp.join(root_dir, f'part_{partition_idx}')
assert osp.exists(partition_dir)

if meta['is_hetero'] is False:
node_pb = torch.load(os.path.join(root_dir, 'node_map.pt'))
edge_pb = torch.load(os.path.join(root_dir, 'edge_map.pt'))
node_pb = torch.load(osp.join(root_dir, 'node_map.pt'))
edge_pb = torch.load(osp.join(root_dir, 'edge_map.pt'))

return (meta, num_partitions, partition_idx, node_pb, edge_pb)
else:
node_pb_dict = {}
node_pb_dir = os.path.join(root_dir, 'node_map')
node_pb_dir = osp.join(root_dir, 'node_map')
for ntype in meta['node_types']:
node_pb_dict[ntype] = torch.load(
os.path.join(node_pb_dir, f'{as_str(ntype)}.pt'))
osp.join(node_pb_dir, f'{as_str(ntype)}.pt'))

edge_pb_dict = {}
edge_pb_dir = os.path.join(root_dir, 'edge_map')
edge_pb_dir = osp.join(root_dir, 'edge_map')
for etype in meta['edge_types']:
edge_pb_dict[tuple(etype)] = torch.load(
os.path.join(edge_pb_dir, f'{as_str(etype)}.pt'))
osp.join(edge_pb_dir, f'{as_str(etype)}.pt'))

return (meta, num_partitions, partition_idx, node_pb_dict,
edge_pb_dict)
57 changes: 29 additions & 28 deletions torch_geometric/graphgym/utils/agg_runs.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import logging
import os
import os.path as osp

import numpy as np

Expand Down Expand Up @@ -91,12 +92,12 @@ def agg_runs(dir, metric_best='auto'):
results_best = {'train': None, 'val': None}
for seed in os.listdir(dir):
if is_seed(seed):
dir_seed = os.path.join(dir, seed)
dir_seed = osp.join(dir, seed)

split = 'val'
if split in os.listdir(dir_seed):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
dir_split = osp.join(dir_seed, split)
fname_stats = osp.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
if metric_best == 'auto':
metric = 'auc' if 'auc' in stats_list[0] else 'accuracy'
Expand All @@ -112,8 +113,8 @@ def agg_runs(dir, metric_best='auto'):

for split in os.listdir(dir_seed):
if is_split(split):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
dir_split = osp.join(dir_seed, split)
fname_stats = osp.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
stats_best = [
stats for stats in stats_list
Expand All @@ -138,9 +139,9 @@ def agg_runs(dir, metric_best='auto'):
results_best[key] = agg_dict_list(results_best[key])
# save aggregated results
for key, value in results.items():
dir_out = os.path.join(dir, 'agg', key)
dir_out = osp.join(dir, 'agg', key)
makedirs_rm_exist(dir_out)
fname = os.path.join(dir_out, 'stats.json')
fname = osp.join(dir_out, 'stats.json')
dict_list_to_json(value, fname)

if cfg.tensorboard_agg:
Expand All @@ -151,11 +152,11 @@ def agg_runs(dir, metric_best='auto'):
dict_list_to_tb(value, writer)
writer.close()
for key, value in results_best.items():
dir_out = os.path.join(dir, 'agg', key)
fname = os.path.join(dir_out, 'best.json')
dir_out = osp.join(dir, 'agg', key)
fname = osp.join(dir_out, 'best.json')
dict_to_json(value, fname)
logging.info('Results aggregated across runs saved in {}'.format(
os.path.join(dir, 'agg')))
osp.join(dir, 'agg')))


def agg_batch(dir, metric_best='auto'):
Expand All @@ -172,58 +173,58 @@ def agg_batch(dir, metric_best='auto'):
for run in os.listdir(dir):
if run != 'agg':
dict_name = name_to_dict(run)
dir_run = os.path.join(dir, run, 'agg')
if os.path.isdir(dir_run):
dir_run = osp.join(dir, run, 'agg')
if osp.isdir(dir_run):
for split in os.listdir(dir_run):
dir_split = os.path.join(dir_run, split)
fname_stats = os.path.join(dir_split, 'best.json')
dir_split = osp.join(dir_run, split)
fname_stats = osp.join(dir_split, 'best.json')
dict_stats = json_to_dict_list(fname_stats)[
-1] # get best val epoch
rm_keys(dict_stats,
['lr', 'lr_std', 'eta', 'eta_std', 'params_std'])
results[split].append({**dict_name, **dict_stats})
dir_out = os.path.join(dir, 'agg')
dir_out = osp.join(dir, 'agg')
makedirs_rm_exist(dir_out)
for key in results:
if len(results[key]) > 0:
results[key] = pd.DataFrame(results[key])
results[key] = results[key].sort_values(
list(dict_name.keys()), ascending=[True] * len(dict_name))
fname = os.path.join(dir_out, '{}_best.csv'.format(key))
fname = osp.join(dir_out, '{}_best.csv'.format(key))
results[key].to_csv(fname, index=False)

results = {'train': [], 'val': [], 'test': []}
for run in os.listdir(dir):
if run != 'agg':
dict_name = name_to_dict(run)
dir_run = os.path.join(dir, run, 'agg')
if os.path.isdir(dir_run):
dir_run = osp.join(dir, run, 'agg')
if osp.isdir(dir_run):
for split in os.listdir(dir_run):
dir_split = os.path.join(dir_run, split)
fname_stats = os.path.join(dir_split, 'stats.json')
dir_split = osp.join(dir_run, split)
fname_stats = osp.join(dir_split, 'stats.json')
dict_stats = json_to_dict_list(fname_stats)[
-1] # get last epoch
rm_keys(dict_stats,
['lr', 'lr_std', 'eta', 'eta_std', 'params_std'])
results[split].append({**dict_name, **dict_stats})
dir_out = os.path.join(dir, 'agg')
dir_out = osp.join(dir, 'agg')
for key in results:
if len(results[key]) > 0:
results[key] = pd.DataFrame(results[key])
results[key] = results[key].sort_values(
list(dict_name.keys()), ascending=[True] * len(dict_name))
fname = os.path.join(dir_out, '{}.csv'.format(key))
fname = osp.join(dir_out, '{}.csv'.format(key))
results[key].to_csv(fname, index=False)

results = {'train': [], 'val': [], 'test': []}
for run in os.listdir(dir):
if run != 'agg':
dict_name = name_to_dict(run)
dir_run = os.path.join(dir, run, 'agg')
if os.path.isdir(dir_run):
dir_run = osp.join(dir, run, 'agg')
if osp.isdir(dir_run):
for split in os.listdir(dir_run):
dir_split = os.path.join(dir_run, split)
fname_stats = os.path.join(dir_split, 'stats.json')
dir_split = osp.join(dir_run, split)
fname_stats = osp.join(dir_split, 'stats.json')
dict_stats = json_to_dict_list(
fname_stats) # get best epoch
if metric_best == 'auto':
Expand All @@ -238,13 +239,13 @@ def agg_batch(dir, metric_best='auto'):
rm_keys(dict_stats,
['lr', 'lr_std', 'eta', 'eta_std', 'params_std'])
results[split].append({**dict_name, **dict_stats})
dir_out = os.path.join(dir, 'agg')
dir_out = osp.join(dir, 'agg')
for key in results:
if len(results[key]) > 0:
results[key] = pd.DataFrame(results[key])
results[key] = results[key].sort_values(
list(dict_name.keys()), ascending=[True] * len(dict_name))
fname = os.path.join(dir_out, '{}_bestepoch.csv'.format(key))
fname = osp.join(dir_out, '{}_bestepoch.csv'.format(key))
results[key].to_csv(fname, index=False)

print('Results aggregated across models saved in {}'.format(dir_out))
15 changes: 7 additions & 8 deletions torch_geometric/loader/mixin.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import glob
import logging
import os
import os.path as osp
from contextlib import contextmanager
from typing import Any, Dict, List, Optional

Expand Down Expand Up @@ -29,21 +29,20 @@ def get_numa_nodes_cores() -> Dict[str, Any]:
nodes = {}
try:
for node_path in numa_node_paths:
numa_node_id = int(os.path.basename(node_path)[4:])
numa_node_id = int(osp.basename(node_path)[4:])

thread_siblings = {}
for cpu_dir in glob.glob(os.path.join(node_path, 'cpu[0-9]*')):
cpu_id = int(os.path.basename(cpu_dir)[3:])
for cpu_dir in glob.glob(osp.join(node_path, 'cpu[0-9]*')):
cpu_id = int(osp.basename(cpu_dir)[3:])
if cpu_id > 0:
with open(os.path.join(cpu_dir,
'online')) as core_online_file:
with open(osp.join(cpu_dir, 'online')) as core_online_file:
core_online = int(
core_online_file.read().splitlines()[0])
else:
core_online = 1 # cpu0 is always online (special case)
if core_online == 1:
with open(os.path.join(cpu_dir, 'topology',
'core_id')) as core_id_file:
with open(osp.join(cpu_dir, 'topology',
'core_id')) as core_id_file:
core_id = int(core_id_file.read().strip())
if core_id in thread_siblings:
thread_siblings[core_id].append(cpu_id)
Expand Down
3 changes: 1 addition & 2 deletions torch_geometric/nn/conv/message_passing.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import inspect
import os
import os.path as osp
import random
import re
Expand Down Expand Up @@ -863,7 +862,7 @@ def jittable(self, typing: Optional[str] = None) -> 'MessagePassing':
forward_types = []
forward_body = 8 * ' ' + f'# type: {typing}\n{forward_body}'

root = os.path.dirname(osp.realpath(__file__))
root = osp.dirname(osp.realpath(__file__))
with open(osp.join(root, 'message_passing.jinja'), 'r') as f:
template = Template(f.read())

Expand Down
8 changes: 4 additions & 4 deletions torch_geometric/nn/model_hub.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import os
import os.path as osp
from pathlib import Path
from typing import Any, Dict, Optional, Union

Expand Down Expand Up @@ -96,7 +96,7 @@ def construct_model_card(self, model_name: str, dataset_name: str) -> Any:
return card

def _save_pretrained(self, save_directory: Union[Path, str]):
path = os.path.join(save_directory, MODEL_WEIGHTS_NAME)
path = osp.join(save_directory, MODEL_WEIGHTS_NAME)
model_to_save = self.module if hasattr(self, 'module') else self
torch.save(model_to_save.state_dict(), path)

Expand Down Expand Up @@ -154,8 +154,8 @@ def _from_pretrained(
):
map_location = torch.device(map_location)

if os.path.isdir(model_id):
model_file = os.path.join(model_id, MODEL_WEIGHTS_NAME)
if osp.isdir(model_id):
model_file = osp.join(model_id, MODEL_WEIGHTS_NAME)
else:
model_file = hf_hub_download(
repo_id=model_id,
Expand Down
Loading

0 comments on commit 5954bb9

Please sign in to comment.