Skip to content

Commit

Permalink
Merge pull request #60 from aws-deepracer-community/log-new-field
Browse files Browse the repository at this point in the history
Add fix to the new obstacle_crash_counter field in log file
  • Loading branch information
larsll authored Oct 17, 2024
2 parents 03daf30 + dd557ff commit 205024a
Show file tree
Hide file tree
Showing 3 changed files with 668 additions and 672 deletions.
14 changes: 7 additions & 7 deletions deepracer/logs/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ class DeepRacerLog:
"episode_status",
"pause_duration"
]
# TODO Column names as a workaround for an excess comma in the CSV file
_COL_NAMES_WORKAROUND = [
# Additional obstacle_crash_counter column is added to the CSV file.
_COL_NAMES_NEW = [
"episode",
"steps",
"x",
Expand All @@ -49,7 +49,6 @@ class DeepRacerLog:
"steering_angle",
"speed",
"action",
"action_b",
"reward",
"done",
"all_wheels_on_track",
Expand All @@ -58,7 +57,8 @@ class DeepRacerLog:
"track_len",
"tstamp",
"episode_status",
"pause_duration"
"pause_duration",
"obstacle_crash_counter"
]
_HYPERPARAM_KEYS = [
"batch_size",
Expand Down Expand Up @@ -120,10 +120,10 @@ def __init__(self, model_folder: str = None, filehandler: FileHandler = None,
def _read_csv(self, path: str, splitRegex, type: LogType = LogType.TRAINING):
try:
csv_bytes = self.fh.get_file(path)
# TODO: this is a workaround and should be removed when logs are fixed
# Work also with a new column
df = pd.read_csv(BytesIO(csv_bytes), encoding='utf8',
names=self._COL_NAMES_WORKAROUND, header=0)
df = df.drop("action_b", axis=1)
names=self._COL_NAMES_NEW, header=0)
df = df.drop("obstacle_crash_counter", axis=1)
except pd.errors.ParserError:
try:
df = pd.read_csv(BytesIO(csv_bytes), names=self._COL_NAMES, header=0)
Expand Down
24 changes: 10 additions & 14 deletions deepracer/logs/log_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,6 @@ def convert_to_pandas(data, episodes_per_iteration=20, stream=None):

for d in data[:]:
parts = d.rstrip().split(",")
# TODO: this is a workaround and should be removed when logs are fixed
parts_workaround = 0
if len(parts) > 17:
parts_workaround = 1
episode = int(parts[0])
steps = int(parts[1])
x = float(parts[2])
Expand All @@ -158,16 +154,16 @@ def convert_to_pandas(data, episodes_per_iteration=20, stream=None):
action = int(parts[7])
except ValueError as e:
action = -1
reward = float(parts[8+parts_workaround])
done = 0 if 'False' in parts[9+parts_workaround] else 1
all_wheels_on_track = parts[10+parts_workaround]
progress = float(parts[11+parts_workaround])
closest_waypoint = int(parts[12+parts_workaround])
track_len = float(parts[13+parts_workaround])
tstamp = Decimal(parts[14+parts_workaround])
episode_status = parts[15+parts_workaround]
if len(parts) > 16+parts_workaround:
pause_duration = float(parts[16+parts_workaround])
reward = float(parts[8])
done = 0 if 'False' in parts[9] else 1
all_wheels_on_track = parts[10]
progress = float(parts[11])
closest_waypoint = int(parts[12])
track_len = float(parts[13])
tstamp = Decimal(parts[14])
episode_status = parts[15]
if len(parts) > 16:
pause_duration = float(parts[16])
else:
pause_duration = 0.0

Expand Down
Loading

0 comments on commit 205024a

Please sign in to comment.