-
Notifications
You must be signed in to change notification settings - Fork 16
/
generate_trajectories.py
105 lines (79 loc) · 3.46 KB
/
generate_trajectories.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import os
import time
import numpy as np
import pickle
import yaml
from tqdm import tqdm
from ev2gym.models import ev2gym_env
from ev2gym.utilities.arg_parser import arg_parser
from ev2gym.rl_agent.reward import SquaredTrackingErrorReward, ProfitMax_TrPenalty_UserIncentives, profit_maximization
from ev2gym.rl_agent.state import V2G_profit_max, PublicPST, V2G_profit_max_loads
from ev2gym.baselines.heuristics import RoundRobin, ChargeAsLateAsPossible, ChargeAsFastAsPossible
if __name__ == "__main__":
args = arg_parser()
# Define the directory where to save and load models
checkpoint_dir = args.save_dir + args.env
reward_function = SquaredTrackingErrorReward
state_function = PublicPST
problem = "PublicPST"
env = ev2gym_env.EV2Gym(config_file=args.config_file,
generate_rnd_game=True,
state_function=state_function,
reward_function=reward_function)
n_trajectories = args.n_trajectories
config = yaml.load(open(args.config_file, 'r'), Loader=yaml.FullLoader)
number_of_charging_stations = config["number_of_charging_stations"]
n_transformers = config["number_of_transformers"]
steps = config["simulation_length"]
timescale = config["timescale"]
trajectories = []
# if args.dataset not in ["ddpg", "random", "optimal"]:
# raise ValueError("Dataset not supported")
trajecotries_type = "mixed-RR-Asap" #args.dataset
file_name = f"{problem}_{trajecotries_type}_{number_of_charging_stations}_cs_{n_transformers}_tr_{steps}_steps_{timescale}_timescale_{n_trajectories}_trajectories2.pkl"
save_folder_path = f"./trajectories/"
if not os.path.exists(save_folder_path):
os.makedirs(save_folder_path)
epoch = 0
# use tqdm with a fancy bar
for i in tqdm(range(n_trajectories)):
trajectory_i = {"observations": [],
"actions": [],
"rewards": [],
"dones": []}
epoch_return = 0
# print(f'Trajectory: {i}')
state, _ = env.reset()
if i % 2 == 0:
agent = ChargeAsFastAsPossible(env)
else:
agent = RoundRobin(env)
test_reward = 0
while True:
actions = agent.get_action(env)
new_state, reward, done, truncated, _ = env.step(actions)
test_reward += reward
trajectory_i["observations"].append(state)
trajectory_i["actions"].append(actions)
trajectory_i["rewards"].append(reward)
trajectory_i["dones"].append(done)
state = new_state
if done:
break
trajectory_i["observations"] = np.array(trajectory_i["observations"])
trajectory_i["actions"] = np.array(trajectory_i["actions"])
trajectory_i["rewards"] = np.array(trajectory_i["rewards"])
trajectory_i["dones"] = np.array(trajectory_i["dones"])
trajectories.append(trajectory_i)
if i % 100000 == 0:
print(f'Saving trajectories to {save_folder_path+file_name}')
f = open(save_folder_path+file_name, 'wb')
# source, destination
pickle.dump(trajectories, f)
env.close()
print(trajectories[:1])
print(f'Saving trajectories to {save_folder_path+file_name}')
f = open(save_folder_path+file_name, 'wb')
# source, destination
pickle.dump(trajectories, f)
f.close()