From 346aa9f6b7318bd0cfcb5b10a34e7b599d93d44a Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Sat, 18 Dec 2021 16:25:04 +0530 Subject: [PATCH 01/14] added soda-parser --- .DS_Store | Bin 0 -> 6148 bytes parsers/.DS_Store | Bin 0 -> 6148 bytes parsers/cifar/.DS_Store | Bin 0 -> 6148 bytes parsers/soda/__init__.py | 0 parsers/soda/parser.py | 40 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 40 insertions(+) create mode 100644 .DS_Store create mode 100644 parsers/.DS_Store create mode 100644 parsers/cifar/.DS_Store create mode 100644 parsers/soda/__init__.py create mode 100644 parsers/soda/parser.py diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..978416809fa192c4878ac943acb512da47175fe1 GIT binary patch literal 6148 zcmeHK%}T>S5Z-O8-BN@e6nb3nS}+Cc4_-p8FJMFuDm5`dgE3o@)*ebBXMG``#OHBl zcO%;BNf0SBF#B!hXOsQ5>~t7oygv!MjI|hJ0u-^*fZ-cKKkAZHtfdOb^%@b&SRPFC zQIIS}li@!yfM-`{Bj)4Du}|lx0`R;Bqa@D9j`PMV)!N#6UDQQWTzmIw>W%&JB=7s# zB|e=gl?3J756LN}=#x&;8N zfmsX8v6sLcY0rRomD?7Bn{=>C8|PXq44QPtZS%qHo4M^!xPCjVFLgNM zu0a}!0b<}Q1K9h4w6Ol4{M`TRAR34PVqi5H;I)A_=)>M@ZC%;XA1B!xig~5*$ jnCMaru~>@RpjyB#aRTUCEDVALg#HL<8fYK}{*-}tr}I-Q literal 0 HcmV?d00001 diff --git a/parsers/.DS_Store b/parsers/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..d702c44cfb237b425107c10e1dbb48dc0d978d6d GIT binary patch literal 6148 zcmeHK!Ait15S`Hq1uuK_m|xhN7a=b73-$-JThK#G78dXMDt}-WkJNe=(@1US3s5=-C@_aDfJ6U{;Em7y`lpp z>gZ|u{iCseUT=44*iUva`i9grZJ%2B>f4XI)BD@;ewuJd-M9Yz%USNpGOjuU&VV!E z3^)Tn!2s@Tk@{NEduPBIa0W&O<%FdAk>cp$8y zKn-OpF<8T44>m6vWtW=)E)G49pq0)Zt3*{|Edsi$(rC z#7EA6Gw{zC;Hud+8*Iw%*0b%&T^rCI(L`ik76k%*_6Wd0&XJqqRDTd1^P*u^lq_N| Q;Xr=~6hgdn27ZBoPdC#qfB*mh literal 0 HcmV?d00001 diff --git a/parsers/cifar/.DS_Store b/parsers/cifar/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..d702c44cfb237b425107c10e1dbb48dc0d978d6d GIT binary patch literal 6148 zcmeHK!Ait15S`Hq1uuK_m|xhN7a=b73-$-JThK#G78dXMDt}-WkJNe=(@1US3s5=-C@_aDfJ6U{;Em7y`lpp z>gZ|u{iCseUT=44*iUva`i9grZJ%2B>f4XI)BD@;ewuJd-M9Yz%USNpGOjuU&VV!E z3^)Tn!2s@Tk@{NEduPBIa0W&O<%FdAk>cp$8y zKn-OpF<8T44>m6vWtW=)E)G49pq0)Zt3*{|Edsi$(rC z#7EA6Gw{zC;Hud+8*Iw%*0b%&T^rCI(L`ik76k%*_6Wd0&XJqqRDTd1^P*u^lq_N| Q;Xr=~6hgdn27ZBoPdC#qfB*mh literal 0 HcmV?d00001 diff --git a/parsers/soda/__init__.py b/parsers/soda/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/parsers/soda/parser.py b/parsers/soda/parser.py new file mode 100644 index 0000000..a83f43c --- /dev/null +++ b/parsers/soda/parser.py @@ -0,0 +1,40 @@ +import json +import os + +PATH = "./SSLAD-2D/SSLAD-2d/train_annotations" +def converter(data, PATH): + + for idx, file in enumerate(data['images']): + idx = idx+1 + + #Darwin format + info = {"dataset": "SODA10M"} + info["annotations"] = [] + + info["image"] = { + "width": int(file['width']), + "height": int(file['height']), + "original_filename": file['file_name'], + "filename": file['file_name'], + "url": '', + "thumbnail_url": '', + "path": '/', + "workview_url": '', + } + for seq,i in enumerate(data["annotations"]): + if i["image_id"] == idx: + info["annotations"].append( + {"bounding_box": {"x": int(i['bbox'][0]), + "y": int(i['bbox'][1]), + "w": int(i['bbox'][2]), + "h": int(i['bbox'][3])}, + "name":data['categories'][i['category_id']-1]['name'] + + } + ) + + json_object = json.dumps(info, indent=4) + + # Writing to sample.json + with open(f"{PATH}/{file['file_name'].split('.')[0]}.json", "w") as outfile: + outfile.write(json_object) \ No newline at end of file From 84772dd52345cd3fb01a5709a8ca3cfcd121db85 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Mon, 3 Jan 2022 18:07:29 +0530 Subject: [PATCH 02/14] SODA10MParser --- parsers/.DS_Store | Bin 6148 -> 6148 bytes parsers/soda/.DS_Store | Bin 0 -> 6148 bytes parsers/soda/parser.py | 159 ++++++++++++++++++++++++++++++++--------- 3 files changed, 125 insertions(+), 34 deletions(-) create mode 100644 parsers/soda/.DS_Store diff --git a/parsers/.DS_Store b/parsers/.DS_Store index d702c44cfb237b425107c10e1dbb48dc0d978d6d..73cef3f24abdaa0339318cb9d810f2b02cff594a 100644 GIT binary patch delta 18 ZcmZoMXfc?Oz{of;aX%yDW<|!wVgNa71>67t delta 21 dcmZoMXfc?OFnJ>5>dD-UqKpii6&W9k0RT~22CM)8 diff --git a/parsers/soda/.DS_Store b/parsers/soda/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..6fadba16e315261bfca558f3f2445012763ff925 GIT binary patch literal 6148 zcmeHKIc~#13?yS02GXcZxnJ-P7KWV<_ybO&6b1wf@X@QvyZkf{4;T>S6e-deLxG%K zinD^I5JeHu)ummEG$OKq8_JJ`so8z=iM?b-fpDC0mJj$|dO1w0A1934fOWvtkNEG{ zchbtY9NW`Z_u2Q|<0F`*0#twsPys4H1^%Fb^y zAovIXoFnaqwa*g3VhLbP90HMnX;6Vd)f_Q2=*X9>tBFHk&_#3j(0sDygrfd*oL{_L zv<5O#0V*(7U>4iP`u_(0%ltnkaYqHHz*8xp^YwPU!YgHOoxGg&+5+Fft>y|h!`dkb m-j0FZj@HE{@ZI`U2j@@K$wp;3YVR^SGOI2aZH literal 0 HcmV?d00001 diff --git a/parsers/soda/parser.py b/parsers/soda/parser.py index a83f43c..80ab6bf 100644 --- a/parsers/soda/parser.py +++ b/parsers/soda/parser.py @@ -1,40 +1,131 @@ +from ..base import Parser +from pathlib import Path +import pickle +import numpy as np +from ..datatypes import Annotation, Image, ImageAnnotationFile, Tag +from typing import Any, List, Tuple +from PIL import Image as PILImage +from dataclasses import asdict import json -import os +from tqdm import tqdm +from glob import glob -PATH = "./SSLAD-2D/SSLAD-2d/train_annotations" -def converter(data, PATH): + + +class SODAParser: + annotations = {} + filename = [] + ALLOWED_PATHS = ["/train", "/test", "/val"] - for idx, file in enumerate(data['images']): - idx = idx+1 + + def __init__( + self, + images_dir: Path, + annotation_dir: Path, + dataset_name: str, + path: str = "/train", + ): + self.images_dir = images_dir + self.annotation_dir = annotation_dir + self.dataset_name = dataset_name + + if path not in self.ALLOWED_PATHS: + raise ValueError(f"path should be one of {self.ALLOWED_PATHS}") + + self.path = path + + + + def parse_annotations(self, + data_type='train', + save_annotation=True)->ImageAnnotationFile: + + files = glob(f'{self.annotation_dir}/*.json') + + if data_type=='train': + json_data = f'{self.annotation_dir}/instance_train.json' + else: + json_data=f'{self.annotation_dir}/instance_val.json' + + f = open(json_data) + data = json.load(f) + + + for idx, file in tqdm(enumerate(data['images']), total=5000, desc='parsing'): + idx = idx+1 + coor = [] + self.filename.append(file['file_name'].split('.')[0]) + for seq, an in enumerate(data["annotations"]): + if len(an) != 0: + if an["image_id"] == idx: + + coor.append(Annotation(name=data['categories'][an['category_id']-1]['name']) + .add_data( + BoundingBox( + x=int(an['bbox'][0]), + y=int(an['bbox'][1]), + w=int(an['bbox'][2]), + h=int(an['bbox'][3])))) + ann = ImageAnnotationFile( + dataset=self.dataset_name, + image=Image( + width=int(file['width']), + height=int(file['height']), + original_filename=file['file_name'], + filename=file['file_name'], + path=self.path), + annotations=coor) + if save_annotation: + self.annotations[file['file_name'].split('.')[0]] = ann + + + def get_annotations(self, idx:int): + ann = self.annotations[self.filename[idx]] + return ann - #Darwin format - info = {"dataset": "SODA10M"} - info["annotations"] = [] + def save_to_json(self, path_to_save='', dir_name='annotationFolder'): + path = f'{path_to_save}/{dir_name}' + try: + if not os.path.exists(path): + os.mkdir(path) + for idx in tqdm(range(5000), desc="Creating JSON file"): + ann = self.get_annotations(idx) + filename = self.filename[idx] + json_object = json.dumps(asdict(ann), indent=4) + with open(f"{path}/{filename}.json", "w") as outfile: + outfile.write(json_object) + except: + print('path exist') + - info["image"] = { - "width": int(file['width']), - "height": int(file['height']), - "original_filename": file['file_name'], - "filename": file['file_name'], - "url": '', - "thumbnail_url": '', - "path": '/', - "workview_url": '', - } - for seq,i in enumerate(data["annotations"]): - if i["image_id"] == idx: - info["annotations"].append( - {"bounding_box": {"x": int(i['bbox'][0]), - "y": int(i['bbox'][1]), - "w": int(i['bbox'][2]), - "h": int(i['bbox'][3])}, - "name":data['categories'][i['category_id']-1]['name'] - - } - ) + def upload_to_darwin(self, + api_key:str, + image_dir: Path, + json_dir: Path): + + images = glob(f'{image_dir}/*.jpg') + annotations = glob(f'{json_dir}/*.json') + client = Client.from_api_key(api_key) + dataset_identifier = f"{client.default_team}/{self.dataset_name}" + try: + dataset = client.create_dataset(self.dataset_name) + except darwin.exceptions.NameTaken: + dataset = client.get_remote_dataset(dataset_identifier) + dataset.push(images, path=self.path) + importer.import_annotations( + dataset, + formats.darwin.parse_file, + annotations, + append=True, + ) + + + + + + - json_object = json.dumps(info, indent=4) - - # Writing to sample.json - with open(f"{PATH}/{file['file_name'].split('.')[0]}.json", "w") as outfile: - outfile.write(json_object) \ No newline at end of file + + + + \ No newline at end of file From 4c7d627f4c4b6946f3b350e152eba0d613790749 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Mon, 3 Jan 2022 22:24:19 +0530 Subject: [PATCH 03/14] Update parsers/soda/parser.py Co-authored-by: Andrea Azzini --- parsers/soda/parser.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/parsers/soda/parser.py b/parsers/soda/parser.py index 80ab6bf..73b9c5d 100644 --- a/parsers/soda/parser.py +++ b/parsers/soda/parser.py @@ -51,8 +51,7 @@ def parse_annotations(self, data = json.load(f) - for idx, file in tqdm(enumerate(data['images']), total=5000, desc='parsing'): - idx = idx+1 + for idx, file in tqdm(enumerate(data['images'], 1), total=5000, desc='parsing'): coor = [] self.filename.append(file['file_name'].split('.')[0]) for seq, an in enumerate(data["annotations"]): From ba1ea39cdc92fb38a0d30fdaf6ec128d8a91485f Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Mon, 3 Jan 2022 22:27:04 +0530 Subject: [PATCH 04/14] added gitignore --- parsers/soda/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 parsers/soda/.gitignore diff --git a/parsers/soda/.gitignore b/parsers/soda/.gitignore new file mode 100644 index 0000000..6a3e68d --- /dev/null +++ b/parsers/soda/.gitignore @@ -0,0 +1 @@ +**/.DS_Store \ No newline at end of file From 5d5945c73ef93e4396a1ff446483e0664552028e Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Mon, 3 Jan 2022 22:43:45 +0530 Subject: [PATCH 05/14] added gitignore --- parsers/soda/.gitignore | 141 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 140 insertions(+), 1 deletion(-) diff --git a/parsers/soda/.gitignore b/parsers/soda/.gitignore index 6a3e68d..c49791e 100644 --- a/parsers/soda/.gitignore +++ b/parsers/soda/.gitignore @@ -1 +1,140 @@ -**/.DS_Store \ No newline at end of file +# vscode +.vscode/ +images/ +annotations/ +*gz +*zip +cifar-10-batches-py/ +main.py +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + + +.DS_Store \ No newline at end of file From 3c2b81290fbd71ffa93ffbc05e5ff40e4a0ae958 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Mon, 3 Jan 2022 22:45:34 +0530 Subject: [PATCH 06/14] added gitignore --- parsers/soda/.gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parsers/soda/.gitignore b/parsers/soda/.gitignore index c49791e..cfe91e0 100644 --- a/parsers/soda/.gitignore +++ b/parsers/soda/.gitignore @@ -137,4 +137,4 @@ dmypy.json .pyre/ -.DS_Store \ No newline at end of file +/*.DS_Store \ No newline at end of file From d5300b5f17cbf56de0db4283352f66aa5f2e4521 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Mon, 3 Jan 2022 22:53:54 +0530 Subject: [PATCH 07/14] added gitignore --- parsers/soda/.gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parsers/soda/.gitignore b/parsers/soda/.gitignore index cfe91e0..4548f02 100644 --- a/parsers/soda/.gitignore +++ b/parsers/soda/.gitignore @@ -137,4 +137,4 @@ dmypy.json .pyre/ -/*.DS_Store \ No newline at end of file +**/.DS_Store \ No newline at end of file From 6cbf4fba906ef0027e6dcec6ec57639e03eb09e2 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Mon, 3 Jan 2022 23:07:52 +0530 Subject: [PATCH 08/14] code formatted --- parsers/soda/parser.py | 114 ++++++++++++++++++----------------------- 1 file changed, 50 insertions(+), 64 deletions(-) diff --git a/parsers/soda/parser.py b/parsers/soda/parser.py index 73b9c5d..d4e0ee3 100644 --- a/parsers/soda/parser.py +++ b/parsers/soda/parser.py @@ -11,12 +11,10 @@ from glob import glob - class SODAParser: annotations = {} - filename = [] + filename = [] ALLOWED_PATHS = ["/train", "/test", "/val"] - def __init__( self, @@ -33,57 +31,60 @@ def __init__( raise ValueError(f"path should be one of {self.ALLOWED_PATHS}") self.path = path - - - - def parse_annotations(self, - data_type='train', - save_annotation=True)->ImageAnnotationFile: - - files = glob(f'{self.annotation_dir}/*.json') - - if data_type=='train': - json_data = f'{self.annotation_dir}/instance_train.json' + + def parse_annotations( + self, data_type="train", save_annotation=True + ) -> ImageAnnotationFile: + + files = glob(f"{self.annotation_dir}/*.json") + + if data_type == "train": + json_data = f"{self.annotation_dir}/instance_train.json" else: - json_data=f'{self.annotation_dir}/instance_val.json' - + json_data = f"{self.annotation_dir}/instance_val.json" + f = open(json_data) data = json.load(f) - - - for idx, file in tqdm(enumerate(data['images'], 1), total=5000, desc='parsing'): + + for idx, file in tqdm(enumerate(data["images"], 1), total=5000, desc="parsing"): coor = [] - self.filename.append(file['file_name'].split('.')[0]) + self.filename.append(file["file_name"].split(".")[0]) for seq, an in enumerate(data["annotations"]): if len(an) != 0: if an["image_id"] == idx: - - coor.append(Annotation(name=data['categories'][an['category_id']-1]['name']) - .add_data( - BoundingBox( - x=int(an['bbox'][0]), - y=int(an['bbox'][1]), - w=int(an['bbox'][2]), - h=int(an['bbox'][3])))) + + coor.append( + Annotation( + name=data["categories"][an["category_id"] - 1]["name"] + ).add_data( + BoundingBox( + x=int(an["bbox"][0]), + y=int(an["bbox"][1]), + w=int(an["bbox"][2]), + h=int(an["bbox"][3]), + ) + ) + ) ann = ImageAnnotationFile( - dataset=self.dataset_name, - image=Image( - width=int(file['width']), - height=int(file['height']), - original_filename=file['file_name'], - filename=file['file_name'], - path=self.path), - annotations=coor) + dataset=self.dataset_name, + image=Image( + width=int(file["width"]), + height=int(file["height"]), + original_filename=file["file_name"], + filename=file["file_name"], + path=self.path, + ), + annotations=coor, + ) if save_annotation: - self.annotations[file['file_name'].split('.')[0]] = ann - - - def get_annotations(self, idx:int): + self.annotations[file["file_name"].split(".")[0]] = ann + + def get_annotations(self, idx: int): ann = self.annotations[self.filename[idx]] return ann - - def save_to_json(self, path_to_save='', dir_name='annotationFolder'): - path = f'{path_to_save}/{dir_name}' + + def save_to_json(self, path_to_save="", dir_name="annotationFolder"): + path = f"{path_to_save}/{dir_name}" try: if not os.path.exists(path): os.mkdir(path) @@ -94,16 +95,12 @@ def save_to_json(self, path_to_save='', dir_name='annotationFolder'): with open(f"{path}/{filename}.json", "w") as outfile: outfile.write(json_object) except: - print('path exist') - - - def upload_to_darwin(self, - api_key:str, - image_dir: Path, - json_dir: Path): - - images = glob(f'{image_dir}/*.jpg') - annotations = glob(f'{json_dir}/*.json') + print("path exist") + + def upload_to_darwin(self, api_key: str, image_dir: Path, json_dir: Path): + + images = glob(f"{image_dir}/*.jpg") + annotations = glob(f"{json_dir}/*.json") client = Client.from_api_key(api_key) dataset_identifier = f"{client.default_team}/{self.dataset_name}" try: @@ -117,14 +114,3 @@ def upload_to_darwin(self, annotations, append=True, ) - - - - - - - - - - - \ No newline at end of file From 9b812ad937a1fa7eef4265190f64b52826e2a800 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Tue, 4 Jan 2022 10:21:55 +0530 Subject: [PATCH 09/14] removed DS_Store --- .DS_Store | Bin 6148 -> 6148 bytes parsers/.DS_Store | Bin 6148 -> 0 bytes parsers/cifar/.DS_Store | Bin 6148 -> 0 bytes parsers/soda/.DS_Store | Bin 6148 -> 0 bytes 4 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 parsers/.DS_Store delete mode 100644 parsers/cifar/.DS_Store delete mode 100644 parsers/soda/.DS_Store diff --git a/.DS_Store b/.DS_Store index 978416809fa192c4878ac943acb512da47175fe1..d69ff3eb2891aea47c2f66ae5a3bd6db5946eac0 100644 GIT binary patch delta 420 zcmZoMXfc=|#>B!ku~2NHo}wrx0|Nsi1A_pAXHI@{QcivnkT0-Vk$E|DJxGd$L60Gw zA(Np5SsJM0Cs0oBKNtX63=CW_bwIW040#Os3`Gp7sCt;v(e&^$I5YS&_%Vbq1TlCr zI5C6*S)M??JJ76Lh7>PU?F^1++SwTj7!rZj6f>lPNHo>!kyTITVVWdjVrinIU~Fkn ztD{hDX=I?IU}9`Gc@tAkE+^1_SD^7O48A}bY&WW5jIzjv0oei|-!T+2lmdMVV&wvH z9zzMxDSAM%0wc7#5&D5ayM=KxI|n}pFb;s6@640=MI1SRQNRRL#IQL+WDPR_y3A^6 delta 111 zcmZoMXfc=|#>B)qu~2NHo}wr-0|Nsi1A_nqLn=cNLvd1haY0hf#=_-{jLeg-uuKv$ zHPcZrG%>H$QK&XFu(Z@surM&4T*aERS%gEFWn#m%&Fmcf96;kX3vzsCp3E=e$N@AE MWFpJv2$40+0M;BD3;+NC diff --git a/parsers/.DS_Store b/parsers/.DS_Store deleted file mode 100644 index 73cef3f24abdaa0339318cb9d810f2b02cff594a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKOKQU~5S?jVFm#h;m%2iB-4xW|o*)-!9YedYQ6O}$Q}qHlKyK4z=^M>7*u->~ zLdpzCZ>0IidSFRIM7(eQIH=@4p@npC9|jal#>WzuLENC%H$WkJNe=(@1US3s5=-C@_aDfJ6U{;Em7y`lpp z>gZ|u{iCseUT=44*iUva`i9grZJ%2B>f4XI)BD@;ewuJd-M9Yz%USNpGOjuU&VV!E z3^)Tn!2s@Tk@{NEduPBIa0W&O<%FdAk>cp$8y zKn-OpF<8T44>m6vWtW=)E)G49pq0)Zt3*{|Edsi$(rC z#7EA6Gw{zC;Hud+8*Iw%*0b%&T^rCI(L`ik76k%*_6Wd0&XJqqRDTd1^P*u^lq_N| Q;Xr=~6hgdn27ZBoPdC#qfB*mh diff --git a/parsers/soda/.DS_Store b/parsers/soda/.DS_Store deleted file mode 100644 index 6fadba16e315261bfca558f3f2445012763ff925..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKIc~#13?yS02GXcZxnJ-P7KWV<_ybO&6b1wf@X@QvyZkf{4;T>S6e-deLxG%K zinD^I5JeHu)ummEG$OKq8_JJ`so8z=iM?b-fpDC0mJj$|dO1w0A1934fOWvtkNEG{ zchbtY9NW`Z_u2Q|<0F`*0#twsPys4H1^%Fb^y zAovIXoFnaqwa*g3VhLbP90HMnX;6Vd)f_Q2=*X9>tBFHk&_#3j(0sDygrfd*oL{_L zv<5O#0V*(7U>4iP`u_(0%ltnkaYqHHz*8xp^YwPU!YgHOoxGg&+5+Fft>y|h!`dkb m-j0FZj@HE{@ZI`U2j@@K$wp;3YVR^SGOI2aZH From fe7480d98ad6dcdc5180aa322c7f4c3ac4668f59 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Tue, 4 Jan 2022 10:56:35 +0530 Subject: [PATCH 10/14] removed DS_Store --- .DS_Store | Bin 6148 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index d69ff3eb2891aea47c2f66ae5a3bd6db5946eac0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKL2uJA6n^dkO*SEVVA3u~k+@c+unMW-Qo2s42S8c}8~~MMsY*l|mnL12s!BP- zkKxKM;lFT#@7b=jri0_eke~G4XZyWpJD(FfCL+TZuC8z zCUKTGn?FQl$GLH{>Q>!7_ub%wTnzFcpJ&}*_J(VxQYPWL9E7jp(PCJ;b0pI|h||$b zCB)GPA#Y#DX(Sh2IZvZZRB@UwOED147PAR$02eUMhDK7zgJy3oON_+GScnRI& zE}||jaE%v~KG7Tt%%ur5r5yA(U_WoYAUwT``KVgCJ`9+*&fU8{Q?@g;3|I!PhymUo zd?;hbV4+cM9Wd$&0Cdo-1ZDN>5A?YK*fCgWL<>adRG>~3=87TA%|YmN_&X-P(5TZ% zsFBf+8CjSciZB-s;j3^G9gWsn1}p=c4D9G?jnDt%Ki~g1lWfg0U>W$Y7!Xcx(Cgxn z%-Op2aD3MK(DzUljw>{(6c}?Hs{$Xz2T&#O`CI^Y3>F&E0 Date: Tue, 4 Jan 2022 11:17:34 +0530 Subject: [PATCH 11/14] added readme --- parsers/soda/README.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 parsers/soda/README.md diff --git a/parsers/soda/README.md b/parsers/soda/README.md new file mode 100644 index 0000000..d9d9c3c --- /dev/null +++ b/parsers/soda/README.md @@ -0,0 +1,42 @@ +# SODA10M Dataset + +## Get the Dataset +You can download the labeled dataset from this [link](https://drive.google.com/file/d/1oSJ0rbqNHLmlOOzpmQqXLDraCCQss4Q4/view?usp=sharing). The link contains both training and validation dataset with annotations. + +## Usage + +``` +from parsers.soda.parser import SODAParser +from pathlib import Path +import os + +parser = SODAParser( + images_dir = 'path_to_image_dir/SSLAD-2d/labelled/train', + annotation_dir = 'path_to_image_dir/SSLAD-2d/labelled/annotations', + dataset_name = soda10m, +) + +#parse it +parser_annotation = parser.parse_annotations() + +#convert annotations to JSON +parser.save_to_json( + path_to_save = 'path', + dir_name = "annotationFolder" +) + +#upload to Darwin +parser.upload_to_darwin( + api_key = api_key, + image_dir = 'path_to_image_dir/SSLAD-2d/labelled/train', + json_dir = 'path/annotationFolder' +) + +#get filenames +print(parser.filename) + +#get single annotations +print(parser.get_annotations(100)) + +``` + From 83f83220628df8d0ca334fa0541f21b38c6de7b5 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Tue, 4 Jan 2022 11:18:57 +0530 Subject: [PATCH 12/14] added readme --- parsers/soda/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/parsers/soda/README.md b/parsers/soda/README.md index d9d9c3c..7e2c759 100644 --- a/parsers/soda/README.md +++ b/parsers/soda/README.md @@ -5,7 +5,8 @@ You can download the labeled dataset from this [link](https://drive.google.com/f ## Usage -``` +```python + from parsers.soda.parser import SODAParser from pathlib import Path import os From bb8726bcfedb7bd5404c0c409f187207c6653585 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Tue, 4 Jan 2022 11:19:45 +0530 Subject: [PATCH 13/14] added readme --- parsers/soda/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parsers/soda/README.md b/parsers/soda/README.md index 7e2c759..dc80f6e 100644 --- a/parsers/soda/README.md +++ b/parsers/soda/README.md @@ -14,7 +14,7 @@ import os parser = SODAParser( images_dir = 'path_to_image_dir/SSLAD-2d/labelled/train', annotation_dir = 'path_to_image_dir/SSLAD-2d/labelled/annotations', - dataset_name = soda10m, + dataset_name = 'soda10m', ) #parse it From 77d04f2b57dd0160ad8e32cc67c92c9d8ee52423 Mon Sep 17 00:00:00 2001 From: Nilesh Barla Date: Tue, 4 Jan 2022 11:24:01 +0530 Subject: [PATCH 14/14] added readme --- parsers/soda/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parsers/soda/README.md b/parsers/soda/README.md index dc80f6e..b3b5049 100644 --- a/parsers/soda/README.md +++ b/parsers/soda/README.md @@ -1,7 +1,7 @@ # SODA10M Dataset ## Get the Dataset -You can download the labeled dataset from this [link](https://drive.google.com/file/d/1oSJ0rbqNHLmlOOzpmQqXLDraCCQss4Q4/view?usp=sharing). The link contains both training and validation dataset with annotations. +You can download the labeled dataset from this [link](https://drive.google.com/file/d/1oSJ0rbqNHLmlOOzpmQqXLDraCCQss4Q4/view?usp=sharing). The link contains both the training and the validation datasets along with their annotations. ## Usage