forked from Charles-Auguste/MOPSI_highway
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_lab.py
87 lines (66 loc) · 2.43 KB
/
test_lab.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
"""
MOPSI Project
RL and autonomous vehicles
This lab is made for quick simulations, with no trained agents.
Its main interest is that the simulation is rendered as seen by the only
(IDM) ego-vehicle, so that we truly understand what are the observations
provided to the ego-vehicle.
Authors : Even Matencio - Charles.A Gourio
Date : 15/02:2021
"""
# Standard library
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
import pygame
import gym
from tqdm import tqdm
# Local source
import highway_env
# 3rd party packages
from datetime import datetime
import imageio
#=====================================================================================
#============================== FUNCTIONS ============================================
#=====================================================================================
def show_var_infos(vari,title="swow_var_info", dirpath = None):
title_file = title
fig,ax = plt.subplots()
ax.plot(vari)
ax.set_title("variance evolution from t=0 to t=T")
ax.set_xlabel("Time (nb it)")
ax.set_ylabel("variance (m/s)")
if dirpath != None:
fig.savefig(dirpath+"/"+ title_file +".png")
plt.show()
env = gym.make('mopsi-env-v0')
#=====================================================================================
#================== CONFIGURATION AND GLOBAL VARIABLES ===============================
#=====================================================================================
# Configuration
env.config["number_of_lane"] = 1
env.config["other_vehicles"] = 10
env.config["controlled_vehicles"] = 1
env.config["duration"] = 100
env.config["circle_radius"] = 200
env.config["screen_width"] = 1000
env.config["screen_height"] = 1000
# There is no trained agent in the simulation
env.config["config_reset"] = "sim"
env.reset()
#=====================================================================================
#============================ MAIN PROGRAM ===========================================
#=====================================================================================
if __name__ == "__main__":
# Main Loop
plt.ion()
fig, ax_lst = plt.subplots()
for i in range(env.config["duration"]):
# Action
obs, reward, done, info = env.step([0,0])
road = np.array(obs[3])
presence = np.array(obs[0])
ax_lst.imshow(presence+road)
fig.canvas.draw()
fig.canvas.flush_events()