diff --git a/config/update_t4_with_fastlabel_sample.yaml b/config/update_t4_with_fastlabel_sample.yaml index 635c2ca4..af0a7c2a 100644 --- a/config/update_t4_with_fastlabel_sample.yaml +++ b/config/update_t4_with_fastlabel_sample.yaml @@ -14,11 +14,9 @@ description: CAM_FRONT_LEFT: 5 conversion: - input_base: ./data/input/t4_format_2d_annotated # could be non_annotated_t4_format or t4_format_3d_annotated + input_base: ./data/input_t4_format # could be non_annotated_t4_format or t4_format_3d_annotated input_anno_base: ./data/fastlabel - input_bag_base: ./data/rosbag2 - output_base: ./data/output/t4_format_2d_annotated # this only includes the 2D annotations - topic_list: ./config/topic_list_sample.yaml + output_base: ./data/output_t4_format # currently, this only includes the 2D annotations dataset_corresponding: # input t4dataset_name: FastLabel json file name - DBv2.0-2-4: 2-4.json + T4DatasetName: FastLabelAnnotationFile diff --git a/perception_dataset/convert.py b/perception_dataset/convert.py index 23c3b969..670fd409 100644 --- a/perception_dataset/convert.py +++ b/perception_dataset/convert.py @@ -380,10 +380,6 @@ def main(): input_anno_base = config_dict["conversion"]["input_anno_base"] dataset_corresponding = config_dict["conversion"]["dataset_corresponding"] description = config_dict["description"] - input_bag_base = config_dict["conversion"]["input_bag_base"] - topic_list_yaml_path = config_dict["conversion"]["topic_list"] - with open(topic_list_yaml_path) as f: - topic_list_yaml = yaml.safe_load(f) converter = FastLabel2dToUpdater( input_base=input_base, @@ -392,8 +388,6 @@ def main(): dataset_corresponding=dataset_corresponding, overwrite_mode=args.overwrite, description=description, - input_bag_base=input_bag_base, - topic_list=topic_list_yaml, ) logger.info( f"[BEGIN] Updating T4 dataset ({input_base}) with FastLabel {input_anno_base} into T4 data ({output_base})" diff --git a/perception_dataset/fastlabel_to_t4/fastlabel_2d_to_t4_updater.py b/perception_dataset/fastlabel_to_t4/fastlabel_2d_to_t4_updater.py index 0df1fb85..c92601d8 100644 --- a/perception_dataset/fastlabel_to_t4/fastlabel_2d_to_t4_updater.py +++ b/perception_dataset/fastlabel_to_t4/fastlabel_2d_to_t4_updater.py @@ -1,9 +1,10 @@ from __future__ import annotations +import json import os.path as osp from pathlib import Path import shutil -from typing import Dict, List +from typing import Dict from perception_dataset.fastlabel_to_t4.fastlabel_2d_to_t4_converter import ( FastLabel2dToT4Converter, @@ -23,8 +24,6 @@ def __init__( dataset_corresponding: Dict[str, int], overwrite_mode: bool, description: Dict[str, Dict[str, str]], - input_bag_base: str | None, - topic_list: Dict[str, List[str]] | List[str], ): super().__init__( input_base, @@ -33,13 +32,13 @@ def __init__( dataset_corresponding, overwrite_mode, description, - input_bag_base, - topic_list, + input_bag_base=None, + topic_list=None, ) def convert(self) -> None: anno_jsons_dict = self._load_annotation_jsons() - fl_annotations = self._format_deepen_annotation(anno_jsons_dict) + fl_annotations = self._format_fastlabel_annotation(anno_jsons_dict) for t4dataset_name in self._t4dataset_name_to_merge: # Check if input directory exists @@ -53,9 +52,13 @@ def convert(self) -> None: output_dir = self._output_base / t4dataset_name / "t4_dataset" if self._input_bag_base is not None: input_bag_dir = Path(self._input_bag_base) / t4dataset_name + if osp.exists(output_dir): logger.error(f"{output_dir} already exists.") is_dir_exist = True + else: + is_dir_exist = False + if self._overwrite_mode or not is_dir_exist: # Remove existing output directory shutil.rmtree(output_dir, ignore_errors=True) @@ -78,3 +81,16 @@ def convert(self) -> None: scene_anno_dict=fl_annotations[t4dataset_name], dataset_name=t4dataset_name, ) + + def _load_annotation_jsons(self): + anno_dict = {} + for file in self._input_anno_files: + t4_dataset_name = None + for name, ann_filename in self._t4dataset_name_to_merge.items(): + if ann_filename == file.name: + t4_dataset_name = name + + assert t4_dataset_name is not None + with open(file) as f: + anno_dict[t4_dataset_name] = json.load(f) + return anno_dict diff --git a/perception_dataset/t4_dataset/annotation_files_updater.py b/perception_dataset/t4_dataset/annotation_files_updater.py index 02f82d9f..62d7f4f7 100644 --- a/perception_dataset/t4_dataset/annotation_files_updater.py +++ b/perception_dataset/t4_dataset/annotation_files_updater.py @@ -1,10 +1,30 @@ +import json import os.path as osp -from typing import Any +from typing import Any, Dict from perception_dataset.t4_dataset.annotation_files_generator import AnnotationFilesGenerator +from perception_dataset.t4_dataset.classes import ( + AttributeTable, + CategoryTable, + InstanceTable, + ObjectAnnTable, + SampleAnnotationTable, + SurfaceAnnTable, + VisibilityTable, +) + + +def _load_json(filepath: str) -> Any: + with open(filepath) as f: + data = json.load(f) + return data class AnnotationFilesUpdater(AnnotationFilesGenerator): + def __init__(self, with_camera: bool = True, description: Dict[str, Dict[str, str]] = ...): + super().__init__(with_camera, description) + self.description = description + def convert_one_scene( self, input_dir: str, @@ -17,18 +37,7 @@ def convert_one_scene( raise ValueError(f"Annotations files doesn't exist in {anno_dir}") # Load existence annotation files - self._attribute_table.insert_from_json(osp.join(anno_dir, self._attribute_table.FILENAME)) - self._category_table.insert_from_json(osp.join(anno_dir, self._category_table.FILENAME)) - self._instance_table.insert_from_json(osp.join(anno_dir, self._instance_table.FILENAME)) - self._sample_annotation_table.insert_from_json( - osp.join(anno_dir, self._sample_annotation_table.FILENAME) - ) - self._object_ann_table.insert_from_json( - osp.join(anno_dir, self._object_ann_table.FILENAME) - ) - self._surface_ann_table.insert_from_json( - osp.join(anno_dir, self._surface_ann_table.FILENAME) - ) + self._init_table_from_json(anno_dir=anno_dir) super().convert_one_scene( input_dir=input_dir, @@ -36,3 +45,50 @@ def convert_one_scene( scene_anno_dict=scene_anno_dict, dataset_name=dataset_name, ) + + def _init_table_from_json(self, anno_dir: str) -> None: + self._attribute_table = AttributeTable.from_json( + filepath=osp.join(anno_dir, AttributeTable.FILENAME), + name_to_description={}, + default_value="", + ) + + self._category_table = CategoryTable.from_json( + filepath=osp.join(anno_dir, CategoryTable.FILENAME), + name_to_description={}, + default_value="", + ) + + self._instance_table = InstanceTable.from_json( + filepath=osp.join(anno_dir, InstanceTable.FILENAME) + ) + + self._visibility_table = VisibilityTable.from_json( + filepath=osp.join(anno_dir, VisibilityTable.FILENAME), + level_to_description=self.description.get( + "visibility", + { + "v0-40": "visibility of whole object is between 0 and 40%", + "v40-60": "visibility of whole object is between 40 and 60%", + "v60-80": "visibility of whole object is between 60 and 80%", + "v80-100": "visibility of whole object is between 80 and 100%", + "none": "visibility isn't available", + }, + ), + default_value="", + ) + + if osp.exists(osp.join(anno_dir, SampleAnnotationTable.FILENAME)): + self._sample_annotation_table = SampleAnnotationTable.from_json( + osp.join(anno_dir, SampleAnnotationTable.FILENAME) + ) + + if osp.exists(osp.join(anno_dir, ObjectAnnTable.FILENAME)): + self._object_ann_table = ObjectAnnTable.from_json( + osp.join(anno_dir, ObjectAnnTable.FILENAME) + ) + + if osp.exists(osp.join(anno_dir, SurfaceAnnTable.FILENAME)): + self._surface_ann_table = SurfaceAnnTable.from_json( + osp.join(anno_dir, SurfaceAnnTable.FILENAME) + ) diff --git a/perception_dataset/t4_dataset/classes/abstract_class.py b/perception_dataset/t4_dataset/classes/abstract_class.py index 563d3863..6eb762f4 100644 --- a/perception_dataset/t4_dataset/classes/abstract_class.py +++ b/perception_dataset/t4_dataset/classes/abstract_class.py @@ -24,9 +24,6 @@ def token(self, token: str): def to_dict(self) -> Dict[str, Any]: raise NotImplementedError() - def __eq__(self, value: T) -> bool: - return self.__dict__ == value.__dict__ - T = TypeVar("T", bound=AbstractRecord) @@ -46,10 +43,6 @@ def _to_record(self, **kwargs) -> T: raise NotImplementedError() def set_record_to_table(self, record: T): - same_tokens = [token for token, v in self._token_to_record.items() if v == record] - assert len(same_tokens) in (0, 1) - if len(same_tokens) == 1: - record.token = same_tokens[0] # overwrite record token with the existing one self._token_to_record[record.token] = record def insert_into_table(self, **kwargs) -> str: @@ -60,19 +53,6 @@ def insert_into_table(self, **kwargs) -> str: self.set_record_to_table(record) return record.token - def insert_from_json(self, filepath: str): - with open(filepath, "r") as f: - table_data: List[Dict[str, Any]] = json.load(f) - - for data in table_data: - token: str = data.pop("token") - record = self._to_record(**data) - record.token = token - assert isinstance( - record, AbstractRecord - ), "_to_record function must return the instance of RecordClass" - self.set_record_to_table(record) - def select_record_from_token(self, token: str) -> T: assert ( token in self._token_to_record @@ -97,3 +77,8 @@ def save_json(self, output_dir: str): table_data = self.to_data() with open(osp.join(output_dir, self.FILENAME), "w") as f: json.dump(table_data, f, indent=4) + + @classmethod + @abstractmethod + def from_json(cls, filepath: str): + raise NotImplementedError diff --git a/perception_dataset/t4_dataset/classes/attribute.py b/perception_dataset/t4_dataset/classes/attribute.py index 2b63cfcb..74561a02 100644 --- a/perception_dataset/t4_dataset/classes/attribute.py +++ b/perception_dataset/t4_dataset/classes/attribute.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Dict from perception_dataset.constants import EXTENSION_ENUM @@ -46,3 +49,22 @@ def get_token_from_name(self, name: str) -> str: self._name_to_token[name] = token return token + + @classmethod + def from_json( + cls, + filepath: str, + name_to_description: Dict[str, str], + default_value: str, + ) -> AttributeTable: + with open(filepath) as f: + items = json.load(f) + + table = cls(name_to_description=name_to_description, default_value=default_value) + + for item in items: + record = AttributeRecord(name=item["name"], description=item["description"]) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/calibrated_sensor.py b/perception_dataset/t4_dataset/classes/calibrated_sensor.py index d1574496..3d76fc4d 100644 --- a/perception_dataset/t4_dataset/classes/calibrated_sensor.py +++ b/perception_dataset/t4_dataset/classes/calibrated_sensor.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Any, Dict, List import numpy as np @@ -58,3 +61,31 @@ def __init__(self): def _to_record(self, **kwargs) -> CalibratedSensorRecord: return CalibratedSensorRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> CalibratedSensorTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = CalibratedSensorRecord( + sensor_token=item["sensor_token"], + translation={ + "x": item["translation"][0], + "y": item["translation"][1], + "z": item["translation"][2], + }, + rotation={ + "w": item["rotation"][0], + "x": item["rotation"][1], + "y": item["rotation"][2], + "z": item["rotation"][3], + }, + camera_intrinsic=item["camera_intrinsic"], + camera_distortion=item["camera_distortion"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/category.py b/perception_dataset/t4_dataset/classes/category.py index 655638da..89fdd38a 100644 --- a/perception_dataset/t4_dataset/classes/category.py +++ b/perception_dataset/t4_dataset/classes/category.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Dict from perception_dataset.constants import EXTENSION_ENUM @@ -46,3 +49,21 @@ def get_token_from_name(self, name: str) -> str: self._name_to_token[name] = token return token + + @classmethod + def from_json( + cls, + filepath: str, + name_to_description: Dict[str, str], + default_value: str, + ) -> CategoryTable: + with open(filepath) as f: + items = json.load(f) + + table = cls(name_to_description=name_to_description, default_value=default_value) + for item in items: + record = CategoryRecord(name=item["name"], description=item["description"]) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/ego_pose.py b/perception_dataset/t4_dataset/classes/ego_pose.py index 878703ff..22cd45ee 100644 --- a/perception_dataset/t4_dataset/classes/ego_pose.py +++ b/perception_dataset/t4_dataset/classes/ego_pose.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Any, Dict, Optional from perception_dataset.constants import EXTENSION_ENUM @@ -92,3 +95,59 @@ def __init__(self): def _to_record(self, **kwargs) -> EgoPoseRecord: return EgoPoseRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> EgoPoseTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = EgoPoseRecord( + translation={ + "x": item["translation"][0], + "y": item["translation"][1], + "z": item["translation"][2], + }, + rotation={ + "w": item["rotation"][0], + "x": item["rotation"][1], + "y": item["rotation"][2], + "z": item["rotation"][3], + }, + timestamp=item["timestamp"], + twist=( + { + "vx": item["twist"][0], + "vy": item["twist"][1], + "vz": item["twist"][2], + "yaw_rate": item["twist"][3], + "pitch_rate": item["twist"][4], + "roll_rate": item["twist"][5], + } + if item.get("twist") is not None + else None + ), + acceleration=( + { + "ax": item["acceleration"][0], + "ay": item["acceleration"][1], + "az": item["acceleration"][2], + } + if item.get("acceleration") is not None + else None + ), + geocoordinate=( + { + "latitude": item["geocoordinate"][0], + "longitude": item["geocoordinate"][1], + "altitude": item["geocoordinate"][2], + } + if item.get("geocoordinate") is not None + else None + ), + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/instance.py b/perception_dataset/t4_dataset/classes/instance.py index 2420d40a..1e6d0135 100644 --- a/perception_dataset/t4_dataset/classes/instance.py +++ b/perception_dataset/t4_dataset/classes/instance.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Any, Dict from perception_dataset.constants import EXTENSION_ENUM @@ -59,3 +62,19 @@ def get_token_from_id(self, instance_id: str, category_token: str, dataset_name: self._id_to_token[instance_id] = token return token + + @classmethod + def from_json(cls, filepath: str) -> InstanceTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = InstanceRecord( + category_token=item["category_token"], + instance_name=item.get("instance_name", ""), + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/log.py b/perception_dataset/t4_dataset/classes/log.py index f409c4b3..da241899 100644 --- a/perception_dataset/t4_dataset/classes/log.py +++ b/perception_dataset/t4_dataset/classes/log.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Any, Dict from perception_dataset.constants import EXTENSION_ENUM @@ -40,3 +43,21 @@ def __init__(self): def _to_record(self, **kwargs) -> LogRecord: return LogRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> LogTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = LogRecord( + logfile=item["logfile"], + vehicle=item["vehicle"], + data_captured=item["data_captured"], + location=item["location"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/map.py b/perception_dataset/t4_dataset/classes/map.py index a0a5c725..613946d0 100644 --- a/perception_dataset/t4_dataset/classes/map.py +++ b/perception_dataset/t4_dataset/classes/map.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Any, Dict, List from perception_dataset.constants import EXTENSION_ENUM @@ -37,3 +40,20 @@ def __init__(self): def _to_record(self, **kwargs) -> MapRecord: return MapRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> MapTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = MapRecord( + log_tokens=item["log_tokens"], + category=item["category"], + filename=item["filename"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/object_ann.py b/perception_dataset/t4_dataset/classes/object_ann.py index 3b587a8f..c4022947 100644 --- a/perception_dataset/t4_dataset/classes/object_ann.py +++ b/perception_dataset/t4_dataset/classes/object_ann.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Dict, List from perception_dataset.constants import EXTENSION_ENUM @@ -69,3 +72,23 @@ def _to_record( mask=mask, ) return record + + @classmethod + def from_json(cls, filepath: str) -> ObjectAnnTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = ObjectAnnRecord( + sample_data_token=item["sample_data_token"], + instance_token=item["instance_token"], + category_token=item["category_token"], + attribute_tokens=item["attribute_tokens"], + bbox=item["bbox"], + mask=item["mask"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/sample.py b/perception_dataset/t4_dataset/classes/sample.py index aa70ec23..86ca58f9 100644 --- a/perception_dataset/t4_dataset/classes/sample.py +++ b/perception_dataset/t4_dataset/classes/sample.py @@ -1,3 +1,7 @@ +from __future__ import annotations + +import json + from perception_dataset.constants import EXTENSION_ENUM from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable @@ -38,3 +42,21 @@ def __init__(self): def _to_record(self, **kwargs) -> SampleRecord: return SampleRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> SampleTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = SampleRecord( + timestamp=item["timestamp"], + scene_token=item["scene_token"], + next_token=item["next"], + prev_token=item["prev"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/sample_annotation.py b/perception_dataset/t4_dataset/classes/sample_annotation.py index ba723f46..05dc948f 100644 --- a/perception_dataset/t4_dataset/classes/sample_annotation.py +++ b/perception_dataset/t4_dataset/classes/sample_annotation.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Dict, List, Optional from perception_dataset.constants import EXTENSION_ENUM @@ -144,3 +147,57 @@ def _to_record( num_radar_pts=num_radar_pts, ) return record + + @classmethod + def from_json(cls, filepath: str) -> SampleAnnotationTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = SampleAnnotationRecord( + sample_token=item["sample_token"], + instance_token=item["instance_token"], + attribute_tokens=item["attribute_tokens"], + visibility_token=item["visibility_token"], + translation={ + "x": item["translation"][0], + "y": item["translation"][1], + "z": item["translation"][2], + }, + velocity=( + { + "x": item["velocity"][0], + "y": item["velocity"][1], + "z": item["velocity"][2], + } + if item.get("velocity") is not None + else None + ), + acceleration=( + { + "x": item["acceleration"][0], + "y": item["acceleration"][1], + "z": item["acceleration"][2], + } + if item.get("acceleration") is not None + else None + ), + size={ + "width": item["size"][0], + "length": item["size"][1], + "height": item["size"][2], + }, + rotation={ + "w": item["rotation"][0], + "x": item["rotation"][1], + "y": item["rotation"][2], + "z": item["rotation"][3], + }, + num_lidar_pts=item["num_lidar_pts"], + num_radar_pts=item["num_radar_pts"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/sample_data.py b/perception_dataset/t4_dataset/classes/sample_data.py index 316aed7e..2327ba64 100644 --- a/perception_dataset/t4_dataset/classes/sample_data.py +++ b/perception_dataset/t4_dataset/classes/sample_data.py @@ -1,3 +1,7 @@ +from __future__ import annotations + +import json + from perception_dataset.constants import EXTENSION_ENUM from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable @@ -62,3 +66,29 @@ def __init__(self): def _to_record(self, **kwargs) -> SampleDataRecord: return SampleDataRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> SampleDataTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = SampleDataRecord( + sample_token=item["sample_token"], + ego_pose_token=item["ego_pose_token"], + calibrated_sensor_token=item["calibrated_sensor_token"], + filename=item["filename"], + fileformat=item["fileformat"], + timestamp=item["timestamp"], + is_key_frame=item["is_key_frame"], + width=item["width"], + height=item["height"], + next_token=item["next"], + prev_token=item["prev"], + is_valid=item["is_valid"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/scene.py b/perception_dataset/t4_dataset/classes/scene.py index 412216c4..1c0c186e 100644 --- a/perception_dataset/t4_dataset/classes/scene.py +++ b/perception_dataset/t4_dataset/classes/scene.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Any, Dict from perception_dataset.constants import EXTENSION_ENUM @@ -46,3 +49,23 @@ def __init__(self): def _to_record(self, **kwargs) -> SceneRecord: return SceneRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> SceneTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = SceneRecord( + name=item["name"], + description=item["description"], + log_token=item["log_token"], + nbr_samples=item["nbr_samples"], + first_sample_token=item["first_sample_token"], + last_sample_token=item["last_sample_token"], + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/sensor.py b/perception_dataset/t4_dataset/classes/sensor.py index aa7e7829..3c985b67 100644 --- a/perception_dataset/t4_dataset/classes/sensor.py +++ b/perception_dataset/t4_dataset/classes/sensor.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Dict from perception_dataset.constants import EXTENSION_ENUM @@ -46,3 +49,16 @@ def get_token_from_channel(self, channel: str): self._channel_to_token[channel] = token return token + + @classmethod + def from_json(cls, filepath: str, channel_to_modality: Dict[str, str]) -> SensorTable: + with open(filepath) as f: + items = json.load(f) + + table = cls(channel_to_modality=channel_to_modality) + for item in items: + record = SensorRecord(channel=item["channel"], modality=item["modality"]) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/surface_ann.py b/perception_dataset/t4_dataset/classes/surface_ann.py index d3ec94af..943a4c6e 100644 --- a/perception_dataset/t4_dataset/classes/surface_ann.py +++ b/perception_dataset/t4_dataset/classes/surface_ann.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Dict from perception_dataset.constants import EXTENSION_ENUM @@ -47,3 +50,20 @@ def _to_record( sample_data_token=sample_data_token, ) return record + + @classmethod + def from_json(cls, filepath: str) -> SurfaceAnnTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = SurfaceAnnRecord( + category_token=item["category_token"], + mask=item["mask"], + sample_data_token=item["sample_data_token"], + ) + record.token = item["token"] + table.select_record_from_token(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/vehicle_state.py b/perception_dataset/t4_dataset/classes/vehicle_state.py index 07c9ae66..61cf4317 100644 --- a/perception_dataset/t4_dataset/classes/vehicle_state.py +++ b/perception_dataset/t4_dataset/classes/vehicle_state.py @@ -1,5 +1,6 @@ from __future__ import annotations +import json from typing import Any, Dict, Optional from perception_dataset.constants import EXTENSION_ENUM @@ -73,3 +74,26 @@ def __init__(self): def _to_record(self, **kwargs) -> VehicleStateRecord: return VehicleStateRecord(**kwargs) + + @classmethod + def from_json(cls, filepath: str) -> VehicleStateTable: + with open(filepath) as f: + items = json.load(f) + + table = cls() + for item in items: + record = VehicleStateRecord( + timestamp=item["timestamp"], + accel_pedal=item.get("accel_pedal"), + brake_pedal=item.get("brake_pedal"), + steer_pedal=item.get("steer_pedal"), + steering_tire_angle=item.get("steering_tire_angle"), + steering_wheel_angle=item.get("steering_wheel_angle"), + shift_state=item.get("shift_state"), + indicators=item.get("indicators"), + additional_info=items.get("additional_info"), + ) + record.token = item["token"] + table.set_record_to_table(record) + + return table diff --git a/perception_dataset/t4_dataset/classes/visibility.py b/perception_dataset/t4_dataset/classes/visibility.py index 42e3410f..b7b405e6 100644 --- a/perception_dataset/t4_dataset/classes/visibility.py +++ b/perception_dataset/t4_dataset/classes/visibility.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +import json from typing import Dict from perception_dataset.constants import EXTENSION_ENUM @@ -46,3 +49,21 @@ def get_token_from_level(self, level: str) -> str: self._level_to_token[level] = token return token + + @classmethod + def from_json( + cls, + filepath: str, + level_to_description: Dict[str, str], + default_value: str, + ) -> VisibilityTable: + with open(filepath) as f: + items = json.load(f) + + table = cls(level_to_description=level_to_description, default_value=default_value) + for item in items: + record = VisibilityRecord(level=item["level"], description=item["description"]) + record.token = item["token"] + table.set_record_to_table(record) + + return table